[Mlir-commits] [mlir] dd5165a - [mlir] replace LLVM dialect float types with built-ins

Alex Zinenko llvmlistbot at llvm.org
Fri Jan 8 08:42:23 PST 2021


Author: Alex Zinenko
Date: 2021-01-08T17:38:12+01:00
New Revision: dd5165a920f66268ee509af31fe84efedacdfbf9

URL: https://github.com/llvm/llvm-project/commit/dd5165a920f66268ee509af31fe84efedacdfbf9
DIFF: https://github.com/llvm/llvm-project/commit/dd5165a920f66268ee509af31fe84efedacdfbf9.diff

LOG: [mlir] replace LLVM dialect float types with built-ins

Continue the convergence between LLVM dialect and built-in types by replacing
the bfloat, half, float and double LLVM dialect types with their built-in
counterparts. At the API level, this is a direct replacement. At the syntax
level, we change the keywords to `bf16`, `f16`, `f32` and `f64`, respectively,
to be compatible with the built-in type syntax. The old keywords can still be
parsed but produce a deprecation warning and will be eventually removed.

Depends On D94178

Reviewed By: mehdi_amini, silvas, antiagainst

Differential Revision: https://reviews.llvm.org/D94179

Added: 
    

Modified: 
    mlir/docs/ConversionToLLVMDialect.md
    mlir/docs/Dialects/LLVM.md
    mlir/docs/Dialects/Linalg.md
    mlir/docs/Dialects/Vector.md
    mlir/docs/LLVMDialectMemRefConvention.md
    mlir/docs/SPIRVToLLVMDialectConversion.md
    mlir/docs/Tutorials/Toy/Ch-6.md
    mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
    mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
    mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
    mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
    mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
    mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
    mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
    mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
    mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
    mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
    mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
    mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
    mlir/lib/ExecutionEngine/JitRunner.cpp
    mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
    mlir/lib/Target/LLVMIR/TypeTranslation.cpp
    mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
    mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
    mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
    mlir/test/Conversion/GPUToCUDA/lower-nvvm-kernel-to-cubin.mlir
    mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
    mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
    mlir/test/Conversion/GPUToROCm/lower-rocdl-kernel-to-hsaco.mlir
    mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
    mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
    mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
    mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
    mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
    mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
    mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
    mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
    mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
    mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
    mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
    mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir
    mlir/test/Dialect/GPU/invalid.mlir
    mlir/test/Dialect/GPU/multiple-all-reduce.mlir
    mlir/test/Dialect/LLVMIR/dialect-cast.mlir
    mlir/test/Dialect/LLVMIR/func.mlir
    mlir/test/Dialect/LLVMIR/global.mlir
    mlir/test/Dialect/LLVMIR/invalid.mlir
    mlir/test/Dialect/LLVMIR/nvvm.mlir
    mlir/test/Dialect/LLVMIR/rocdl.mlir
    mlir/test/Dialect/LLVMIR/roundtrip.mlir
    mlir/test/Dialect/LLVMIR/types-invalid.mlir
    mlir/test/Dialect/LLVMIR/types.mlir
    mlir/test/Dialect/Linalg/llvm.mlir
    mlir/test/Target/avx512.mlir
    mlir/test/Target/import.ll
    mlir/test/Target/llvmir-intrinsics.mlir
    mlir/test/Target/llvmir-invalid.mlir
    mlir/test/Target/llvmir-types.mlir
    mlir/test/Target/llvmir.mlir
    mlir/test/Target/nvvmir.mlir
    mlir/test/Target/openmp-llvm.mlir
    mlir/test/Target/rocdl.mlir
    mlir/test/mlir-cpu-runner/simple.mlir

Removed: 
    


################################################################################
diff  --git a/mlir/docs/ConversionToLLVMDialect.md b/mlir/docs/ConversionToLLVMDialect.md
index db84e9c5be74..d0ea746853b1 100644
--- a/mlir/docs/ConversionToLLVMDialect.md
+++ b/mlir/docs/ConversionToLLVMDialect.md
@@ -25,10 +25,10 @@ Scalar types are converted to their LLVM counterparts if they exist. The
 following conversions are currently implemented:
 
 -   `i*` converts to `!llvm.i*`
--   `bf16` converts to `!llvm.bfloat`
--   `f16` converts to `!llvm.half`
--   `f32` converts to `!llvm.float`
--   `f64` converts to `!llvm.double`
+-   `bf16` converts to `bf16`
+-   `f16` converts to `f16`
+-   `f32` converts to `f32`
+-   `f64` converts to `f64`
 
 ### Index Type
 
@@ -48,8 +48,8 @@ size with element type converted using these conversion rules. In the
 n-dimensional case, MLIR vectors are converted to (n-1)-dimensional array types
 of one-dimensional vectors.
 
-For example, `vector<4 x f32>` converts to `!llvm.vec<4 x float>` and `vector<4
-x 8 x 16 x f32>` converts to `!llvm.array<4 x array<8 x vec<16 x float>>>`.
+For example, `vector<4 x f32>` converts to `!llvm.vec<4 x f32>` and `vector<4 x
+8 x 16 x f32>` converts to `!llvm.array<4 x array<8 x vec<16 x f32>>>`.
 
 ### Ranked Memref Types
 
@@ -106,18 +106,18 @@ resulting in a struct containing two pointers + offset.
 Examples:
 
 ```mlir
-memref<f32> -> !llvm.struct<(ptr<float> , ptr<float>, i64)>
-memref<1 x f32> -> !llvm.struct<(ptr<float>, ptr<float>, i64,
+memref<f32> -> !llvm.struct<(ptr<f32> , ptr<f32>, i64)>
+memref<1 x f32> -> !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                  array<1 x 64>, array<1 x i64>)>
-memref<? x f32> -> !llvm.struct<(ptr<float>, ptr<float>, i64
+memref<? x f32> -> !llvm.struct<(ptr<f32>, ptr<f32>, i64
                                  array<1 x 64>, array<1 x i64>)>
-memref<10x42x42x43x123 x f32> -> !llvm.struct<(ptr<float>, ptr<float>, i64
+memref<10x42x42x43x123 x f32> -> !llvm.struct<(ptr<f32>, ptr<f32>, i64
                                                array<5 x 64>, array<5 x i64>)>
-memref<10x?x42x?x123 x f32> -> !llvm.struct<(ptr<float>, ptr<float>, i64
+memref<10x?x42x?x123 x f32> -> !llvm.struct<(ptr<f32>, ptr<f32>, i64
                                              array<5 x 64>, array<5 x i64>)>
 
 // Memref types can have vectors as element types
-memref<1x? x vector<4xf32>> -> !llvm.struct<(ptr<vec<4 x float>>,
+memref<1x? x vector<4xf32>> -> !llvm.struct<(ptr<vec<4 x f32>>,
                                              ptr<vec<4 x float>>, i64,
                                              array<1 x i64>, array<1 x i64>)>
 ```
@@ -132,11 +132,11 @@ attribute.
 Examples:
 
 ```mlir
-memref<f32> -> !llvm.ptr<float>
-memref<10x42 x f32> -> !llvm.ptr<float>
+memref<f32> -> !llvm.ptr<f32>
+memref<10x42 x f32> -> !llvm.ptr<f32>
 
 // Memrefs with vector types are also supported.
-memref<10x42 x vector<4xf32>> -> !llvm.ptr<vec<4 x float>>
+memref<10x42 x vector<4xf32>> -> !llvm.ptr<vec<4 x f32>>
 ```
 
 ### Unranked Memref types
@@ -196,12 +196,12 @@ Examples:
 // Binary function with one result:
 (i32, f32) -> (i64)
 // has its arguments handled separately
-!llvm.func<i64 (i32, float)>
+!llvm.func<i64 (i32, f32)>
 
 // Binary function with two results:
 (i32, f32) -> (i64, f64)
 // has its result aggregated into a structure type.
-!llvm.func<struct<(i64, double)> (i32, float)>
+!llvm.func<struct<(i64, f64)> (i32, f32)>
 ```
 
 #### Functions as Function Arguments or Results
@@ -249,19 +249,19 @@ Examples:
 // A memref descriptor appearing as function argument:
 (memref<f32>) -> ()
 // gets converted into a list of individual scalar components of a descriptor.
-!llvm.func<void (ptr<float>, ptr<float>, i64)>
+!llvm.func<void (ptr<f32>, ptr<f32>, i64)>
 
 // The list of arguments is linearized and one can freely mix memref and other
 // types in this list:
 (memref<f32>, f32) -> ()
 // which gets converted into a flat list.
-!llvm.func<void (ptr<float>, ptr<float>, i64, float)>
+!llvm.func<void (ptr<f32>, ptr<f32>, i64, f32)>
 
 // For nD ranked memref descriptors:
 (memref<?x?xf32>) -> ()
 // the converted signature will contain 2n+1 `index`-typed integer arguments,
 // offset, n sizes and n strides, per memref argument type.
-!llvm.func<void (ptr<float>, ptr<float>, i64, i64, i64, i64, i64)>
+!llvm.func<void (ptr<f32>, ptr<f32>, i64, i64, i64, i64, i64)>
 
 // Same rules apply to unranked descriptors:
 (memref<*xf32>) -> ()
@@ -271,12 +271,12 @@ Examples:
 // However, returning a memref from a function is not affected:
 () -> (memref<?xf32>)
 // gets converted to a function returning a descriptor structure.
-!llvm.func<struct<(ptr<float>, ptr<float>, i64, array<1xi64>, array<1xi64>)> ()>
+!llvm.func<struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<1xi64>)> ()>
 
 // If multiple memref-typed results are returned:
 () -> (memref<f32>, memref<f64>)
 // their descriptor structures are additionally packed into another structure,
 // potentially with other non-memref typed results.
-!llvm.func<struct<(struct<(ptr<float>, ptr<float>, i64)>,
+!llvm.func<struct<(struct<(ptr<f32>, ptr<f32>, i64)>,
                    struct<(ptr<double>, ptr<double>, i64)>)> ()>
 ```

diff  --git a/mlir/docs/Dialects/LLVM.md b/mlir/docs/Dialects/LLVM.md
index 666f693c453b..3d91588b2d21 100644
--- a/mlir/docs/Dialects/LLVM.md
+++ b/mlir/docs/Dialects/LLVM.md
@@ -115,7 +115,7 @@ Examples:
 ```mlir
 // Create an undefined value of structure type with a 32-bit integer followed
 // by a float.
-%0 = llvm.mlir.undef : !llvm.struct<(i32, float)>
+%0 = llvm.mlir.undef : !llvm.struct<(i32, f32)>
 
 // Null pointer to i8.
 %1 = llvm.mlir.null : !llvm.ptr<i8>
@@ -127,7 +127,7 @@ Examples:
 %3 = llvm.mlir.constant(42 : i32) : i32
 
 // Splat dense vector constant.
-%3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x float>
+%3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x f32>
 ```
 
 Note that constants use built-in types within the initializer definition: MLIR
@@ -214,14 +214,6 @@ containing an 8-bit and a 32-bit integer.
 
 The following non-parametric types are supported.
 
--   `!llvm.bfloat` (`LLVMBFloatType`) - 16-bit “brain” floating-point value
-    (7-bit significand).
--   `!llvm.half` (`LLVMHalfType`) - 16-bit floating-point value as per
-    IEEE-754-2008.
--   `!llvm.float` (`LLVMFloatType`) - 32-bit floating-point value as per
-    IEEE-754-2008.
--   `!llvm.double` (`LLVMDoubleType`) - 64-bit floating-point value as per
-    IEEE-754-2008.
 -   `!llvm.fp128` (`LLVMFP128Type`) - 128-bit floating-point value as per
     IEEE-754-2008.
 -   `!llvm.x86_fp80` (`LLVMX86FP80Type`) - 80-bit floating-point value (x87).
@@ -322,7 +314,7 @@ For example,
 
 ```mlir
 !llvm.func<void ()>            // a function with no arguments;
-!llvm.func<i32 (float, i32)>  // a function with two arguments and a result;
+!llvm.func<i32 (f32, i32)>  // a function with two arguments and a result;
 !llvm.func<void (i32, ...)>   // a variadic function with at least one argument.
 ```
 

diff  --git a/mlir/docs/Dialects/Linalg.md b/mlir/docs/Dialects/Linalg.md
index 18473f4cb796..922455dddbda 100644
--- a/mlir/docs/Dialects/Linalg.md
+++ b/mlir/docs/Dialects/Linalg.md
@@ -429,11 +429,11 @@ func @example(%arg0: !llvm<"float*">, ...) {
 
 llvm.func @pointwise_add(%arg0: !llvm<"float*">, ...) attributes {llvm.emit_c_interface} {
   ...
-  llvm.call @_mlir_ciface_pointwise_add(%9, %19, %29) : (!llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }
+  llvm.call @_mlir_ciface_pointwise_add(%9, %19, %29) : (!llvm."{ float*, float*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ f32*, f32*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }
 *">) -> ()
   llvm.return
 }
-llvm.func @_mlir_ciface_pointwise_add(!llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ float*, float*, i64, [2 x i64], [2 x i64] }*">) attributes {llvm.emit_c_interface}
+llvm.func @_mlir_ciface_pointwise_add(!llvm."{ float*, float*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ f32*, f32*, i64, [2 x i64], [2 x i64] }*">, !llvm<"{ f32*, f32*, i64, [2 x i64], [2 x i64] }*">) attributes {llvm.emit_c_interface}
 ```
 
 ##### Convention For External Library Interoperability

diff  --git a/mlir/docs/Dialects/Vector.md b/mlir/docs/Dialects/Vector.md
index 31a530a913b2..29716f1df7eb 100644
--- a/mlir/docs/Dialects/Vector.md
+++ b/mlir/docs/Dialects/Vector.md
@@ -264,11 +264,11 @@ Consider a vector of rank n with  static sizes `{s_0, ... s_{n-1}}` (i.e. an
 MLIR `vector<s_0x...s_{n-1}xf32>`). Lowering such an `n-D` MLIR vector type to
 an LLVM descriptor can be done by either:
 
-1. Flattening to a `1-D` vector: `!llvm<"(s_0*...*s_{n-1})xfloat">` in the
-MLIR LLVM dialect.
-2. Nested aggregate type of `1-D` vector:
-`!llvm<"[s_0x[s_1x[...<s_{n-1}xfloat>]]]">` in the MLIR LLVM dialect.
-3. A mix of both.
+1.  Flattening to a `1-D` vector: `!llvm<"(s_0*...*s_{n-1})xfloat">` in the MLIR
+    LLVM dialect.
+2.  Nested aggregate type of `1-D` vector:
+    `!llvm."[s_0x[s_1x[...<s_{n-1}xf32>]]]">` in the MLIR LLVM dialect.
+3.  A mix of both.
 
 There are multiple tradeoffs involved in choosing one or the other that we
 discuss. It is important to note that “a mix of both” immediately reduces to

diff  --git a/mlir/docs/LLVMDialectMemRefConvention.md b/mlir/docs/LLVMDialectMemRefConvention.md
index b52db054225f..78ec6fb00752 100644
--- a/mlir/docs/LLVMDialectMemRefConvention.md
+++ b/mlir/docs/LLVMDialectMemRefConvention.md
@@ -82,11 +82,11 @@ func @foo(%arg0: memref<?xf32>) -> () {
 
 // Gets converted to the following
 // (using type alias for brevity):
-!llvm.memref_1d = type !llvm.struct<(ptr<float>, ptr<float>, i64,
+!llvm.memref_1d = type !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                      array<1xi64>, array<1xi64>)>
 
-llvm.func @foo(%arg0: !llvm.ptr<float>,  // Allocated pointer.
-               %arg1: !llvm.ptr<float>,  // Aligned pointer.
+llvm.func @foo(%arg0: !llvm.ptr<f32>,  // Allocated pointer.
+               %arg1: !llvm.ptr<f32>,  // Aligned pointer.
                %arg2: i64,         // Offset.
                %arg3: i64,         // Size in dim 0.
                %arg4: i64) {       // Stride in dim 0.
@@ -113,7 +113,7 @@ func @bar() {
 
 // Gets converted to the following
 // (using type alias for brevity):
-!llvm.memref_1d = type !llvm.struct<(ptr<float>, ptr<float>, i64,
+!llvm.memref_1d = type !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                      array<1xi64>, array<1xi64>)>
 
 llvm.func @bar() {
@@ -264,11 +264,11 @@ func @qux(%arg0: memref<?x?xf32>)
 
 // Gets converted into the following
 // (using type alias for brevity):
-!llvm.memref_2d = type !llvm.struct<(ptr<float>, ptr<float>, i64,
+!llvm.memref_2d = type !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                      array<2xi64>, array<2xi64>)>
 
 // Function with unpacked arguments.
-llvm.func @qux(%arg0: !llvm.ptr<float>, %arg1: !llvm.ptr<float>,
+llvm.func @qux(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>,
                %arg2: i64, %arg3: i64, %arg4: i64,
                %arg5: i64, %arg6: i64) {
   // Populate memref descriptor (as per calling convention).
@@ -284,14 +284,14 @@ llvm.func @qux(%arg0: !llvm.ptr<float>, %arg1: !llvm.ptr<float>,
   // Store the descriptor in a stack-allocated space.
   %8 = llvm.mlir.constant(1 : index) : i64
   %9 = llvm.alloca %8 x !llvm.memref_2d
-     : (i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
+     : (i64) -> !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64,
                                         array<2xi64>, array<2xi64>)>>
-  llvm.store %7, %9 : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
+  llvm.store %7, %9 : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64,
                                         array<2xi64>, array<2xi64>)>>
 
   // Call the interface function.
   llvm.call @_mlir_ciface_qux(%9)
-     : (!llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
+     : (!llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64,
                           array<2xi64>, array<2xi64>)>>) -> ()
 
   // The stored descriptor will be freed on return.
@@ -299,7 +299,7 @@ llvm.func @qux(%arg0: !llvm.ptr<float>, %arg1: !llvm.ptr<float>,
 }
 
 // Interface function.
-llvm.func @_mlir_ciface_qux(!llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
+llvm.func @_mlir_ciface_qux(!llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64,
                                               array<2xi64>, array<2xi64>)>>)
 ```
 
@@ -310,13 +310,13 @@ func @foo(%arg0: memref<?x?xf32>) {
 
 // Gets converted into the following
 // (using type alias for brevity):
-!llvm.memref_2d = type !llvm.struct<(ptr<float>, ptr<float>, i64,
+!llvm.memref_2d = type !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                      array<2xi64>, array<2xi64>)>
-!llvm.memref_2d_ptr = type !llvm.ptr<struct<(ptr<float>, ptr<float>, i64,
+!llvm.memref_2d_ptr = type !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64,
                                              array<2xi64>, array<2xi64>)>>
 
 // Function with unpacked arguments.
-llvm.func @foo(%arg0: !llvm.ptr<float>, %arg1: !llvm.ptr<float>,
+llvm.func @foo(%arg0: !llvm.ptr<f32>, %arg1: !llvm.ptr<f32>,
                %arg2: i64, %arg3: i64, %arg4: i64,
                %arg5: i64, %arg6: i64) {
   llvm.return
@@ -336,7 +336,7 @@ llvm.func @_mlir_ciface_foo(%arg0: !llvm.memref_2d_ptr) {
   %6 = llvm.extractvalue %0[4, 0] : !llvm.memref_2d
   %7 = llvm.extractvalue %0[4, 1] : !llvm.memref_2d
   llvm.call @foo(%1, %2, %3, %4, %5, %6, %7)
-    : (!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64,
+    : (!llvm.ptr<f32>, !llvm.ptr<f32>, i64, i64, i64,
        i64, i64) -> ()
   llvm.return
 }
@@ -395,7 +395,7 @@ is transformed into the equivalent of the following code:
 // Compute the linearized index from strides.
 // When strides or, in absence of explicit strides, the corresponding sizes are
 // dynamic, extract the stride value from the descriptor.
-%stride1 = llvm.extractvalue[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64,
+%stride1 = llvm.extractvalue[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                                    array<4xi64>, array<4xi64>)>
 %addr1 = muli %stride1, %1 : i64
 
@@ -415,21 +415,21 @@ is transformed into the equivalent of the following code:
 
 // If the linear offset is known to be zero, it can also be omitted. If it is
 // dynamic, it is extracted from the descriptor.
-%offset = llvm.extractvalue[2] : !llvm.struct<(ptr<float>, ptr<float>, i64,
+%offset = llvm.extractvalue[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                                array<4xi64>, array<4xi64>)>
 %addr7 = addi %addr6, %offset : i64
 
 // All accesses are based on the aligned pointer.
-%aligned = llvm.extractvalue[1] : !llvm.struct<(ptr<float>, ptr<float>, i64,
+%aligned = llvm.extractvalue[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64,
                                                 array<4xi64>, array<4xi64>)>
 
 // Get the address of the data pointer.
 %ptr = llvm.getelementptr %aligned[%addr8]
-     : !llvm.struct<(ptr<float>, ptr<float>, i64, array<4xi64>, array<4xi64>)>
-     -> !llvm.ptr<float>
+     : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<4xi64>, array<4xi64>)>
+     -> !llvm.ptr<f32>
 
 // Perform the actual load.
-%0 = llvm.load %ptr : !llvm.ptr<float>
+%0 = llvm.load %ptr : !llvm.ptr<f32>
 ```
 
 For stores, the address computation code is identical and only the actual store

diff  --git a/mlir/docs/SPIRVToLLVMDialectConversion.md b/mlir/docs/SPIRVToLLVMDialectConversion.md
index 494dcda7d8ab..30188f692b43 100644
--- a/mlir/docs/SPIRVToLLVMDialectConversion.md
+++ b/mlir/docs/SPIRVToLLVMDialectConversion.md
@@ -23,14 +23,14 @@ This section describes how SPIR-V Dialect types are mapped to LLVM Dialect.
 
 ### Scalar types
 
-SPIR-V Dialect                       | LLVM Dialect
-:----------------------------------: | :----------------------------------:
-`i<bitwidth>`                        | `!llvm.i<bitwidth>`
-`si<bitwidth>`                       | `!llvm.i<bitwidth>`
-`ui<bitwidth>`                       | `!llvm.i<bitwidth>`
-`f16`                                | `!llvm.half`
-`f32`                                | `!llvm.float`
-`f64`                                | `!llvm.double`
+SPIR-V Dialect | LLVM Dialect
+:------------: | :-----------------:
+`i<bitwidth>`  | `!llvm.i<bitwidth>`
+`si<bitwidth>` | `!llvm.i<bitwidth>`
+`ui<bitwidth>` | `!llvm.i<bitwidth>`
+`f16`          | `f16`
+`f32`          | `f32`
+`f64`          | `f64`
 
 ### Vector types
 
@@ -442,7 +442,7 @@ order to go through the pointer.
 %i   = ...
 %var = ...
 %0   = llvm.mlir.constant(0 : i32) : i32
-%el  = llvm.getelementptr %var[%0, %i, %i] : (!llvm.ptr<struct<packed (float, array<4 x float>)>>, i32, i32, i32)
+%el  = llvm.getelementptr %var[%0, %i, %i] : (!llvm.ptr<struct<packed (f32, array<4 x f32>)>>, i32, i32, i32)
 ```
 
 #### `spv.Load` and `spv.Store`
@@ -451,25 +451,20 @@ These ops are converted to their LLVM counterparts: `llvm.load` and
 `llvm.store`. If the op has a memory access attribute, then there are the
 following cases, based on the value of the attribute:
 
-* **Aligned**: alignment is passed on to LLVM op builder, for example:
-  ```mlir
-  // llvm.store %ptr, %val {alignment = 4 : i64} : !llvm.ptr<float>
-  spv.Store "Function" %ptr, %val ["Aligned", 4] : f32
-  ```
-* **None**: same case as if there is no memory access attribute.
-
-* **Nontemporal**: set `nontemporal` flag, for example:
-  ```mlir
-  // %res = llvm.load %ptr {nontemporal} : !llvm.ptr<float>
-  %res = spv.Load "Function" %ptr ["Nontemporal"] : f32
-  ```
-* **Volatile**: mark the op as `volatile`, for example:
-  ```mlir
-  // %res = llvm.load volatile %ptr : !llvm.ptr<float>
-  %res = spv.Load "Function" %ptr ["Volatile"] : f32
-  ```
-Otherwise the conversion fails as other cases (`MakePointerAvailable`,
-`MakePointerVisible`, `NonPrivatePointer`) are not supported yet.
+*   **Aligned**: alignment is passed on to LLVM op builder, for example: `mlir
+    // llvm.store %ptr, %val {alignment = 4 : i64} : !llvm.ptr<f32> spv.Store
+    "Function" %ptr, %val ["Aligned", 4] : f32`
+*   **None**: same case as if there is no memory access attribute.
+
+*   **Nontemporal**: set `nontemporal` flag, for example: `mlir // %res =
+    llvm.load %ptr {nontemporal} : !llvm.ptr<f32> %res = spv.Load "Function"
+    %ptr ["Nontemporal"] : f32`
+
+*   **Volatile**: mark the op as `volatile`, for example: `mlir // %res =
+    llvm.load volatile %ptr : !llvm.ptr<f32> %res = spv.Load "Function" %ptr
+    ["Volatile"] : f32` Otherwise the conversion fails as other cases
+    (`MakePointerAvailable`, `MakePointerVisible`, `NonPrivatePointer`) are not
+    supported yet.
 
 #### `spv.globalVariable` and `spv.mlir.addressof`
 
@@ -493,9 +488,9 @@ spv.module Logical GLSL450 {
 
 // Converted result
 module {
-  llvm.mlir.global private @struct() : !llvm.struct<packed (float, [10 x float])>
+  llvm.mlir.global private @struct() : !llvm.struct<packed (f32, [10 x f32])>
   llvm.func @func() {
-    %0 = llvm.mlir.addressof @struct : !llvm.ptr<struct<packed (float, [10 x float])>>
+    %0 = llvm.mlir.addressof @struct : !llvm.ptr<struct<packed (f32, [10 x f32])>>
     llvm.return
   }
 }
@@ -522,7 +517,7 @@ If the global variable's pointer has `Input` storage class, then a `constant`
 flag is added to LLVM op:
 
 ```mlir
-spv.globalVariable @var : !spv.ptr<f32, Input>    =>    llvm.mlir.global external constant @var() : !llvm.float
+spv.globalVariable @var : !spv.ptr<f32, Input>    =>    llvm.mlir.global external constant @var() : f32
 ```
 
 #### `spv.Variable`
@@ -539,7 +534,7 @@ Also, at the moment initialization is only possible via `spv.constant`.
 ```mlir
 // Conversion of VariableOp without initialization
                                                                %size = llvm.mlir.constant(1 : i32) : i32
-%res = spv.Variable : !spv.ptr<vector<3xf32>, Function>   =>   %res  = llvm.alloca  %size x !llvm.vec<3 x float> : (i32) -> !llvm.ptr<vec<3 x float>>
+%res = spv.Variable : !spv.ptr<vector<3xf32>, Function>   =>   %res  = llvm.alloca  %size x !llvm.vec<3 x f32> : (i32) -> !llvm.ptr<vec<3 x f32>>
 
 // Conversion of VariableOp with initialization
                                                                %c    = llvm.mlir.constant(0 : i64) : i64
@@ -664,8 +659,8 @@ the conditional branch.
 `spv.FunctionCall` maps to `llvm.call`. For example:
 
 ```mlir
-%0 = spv.FunctionCall @foo() : () -> i32    =>    %0 = llvm.call @foo() : () -> !llvm.float
-spv.FunctionCall @bar(%0) : (i32) -> ()     =>    llvm.call @bar(%0) : (!llvm.float) -> ()
+%0 = spv.FunctionCall @foo() : () -> i32    =>    %0 = llvm.call @foo() : () -> f32
+spv.FunctionCall @bar(%0) : (i32) -> ()     =>    llvm.call @bar(%0) : (f32) -> ()
 ```
 
 ### `spv.selection` and `spv.loop`
@@ -750,28 +745,31 @@ SPIR-V Dialect op                     | LLVM Dialect op
 ### Special cases
 
 `spv.InverseSqrt` is mapped to:
+
 ```mlir
-                                           %one  = llvm.mlir.constant(1.0 : f32) : !llvm.float
-%res = spv.InverseSqrt %arg : f32    =>    %sqrt = "llvm.intr.sqrt"(%arg) : (!llvm.float) -> !llvm.float
-                                           %res  = fdiv %one, %sqrt : !llvm.float
+                                           %one  = llvm.mlir.constant(1.0 : f32) : f32
+%res = spv.InverseSqrt %arg : f32    =>    %sqrt = "llvm.intr.sqrt"(%arg) : (f32) -> f32
+                                           %res  = fdiv %one, %sqrt : f32
 ```
 
 `spv.Tan` is mapped to:
+
 ```mlir
-                                   %sin = "llvm.intr.sin"(%arg) : (!llvm.float) -> !llvm.float
-%res = spv.Tan %arg : f32    =>    %cos = "llvm.intr.cos"(%arg) : (!llvm.float) -> !llvm.float
-                                   %res = fdiv %sin, %cos : !llvm.float
+                                   %sin = "llvm.intr.sin"(%arg) : (f32) -> f32
+%res = spv.Tan %arg : f32    =>    %cos = "llvm.intr.cos"(%arg) : (f32) -> f32
+                                   %res = fdiv %sin, %cos : f32
 ```
 
 `spv.Tanh` is modelled using the equality `tanh(x) = {exp(2x) - 1}/{exp(2x) + 1}`:
+
 ```mlir
-                                     %two   = llvm.mlir.constant(2.0: f32) : !llvm.float
-                                     %2xArg = llvm.fmul %two, %arg : !llvm.float
-                                     %exp   = "llvm.intr.exp"(%2xArg) : (!llvm.float) -> !llvm.float
-%res = spv.Tanh %arg : f32     =>    %one   = llvm.mlir.constant(1.0 : f32) : !llvm.float
-                                     %num   = llvm.fsub %exp, %one : !llvm.float
-                                     %den   = llvm.fadd %exp, %one : !llvm.float
-                                     %res   = llvm.fdiv %num, %den : !llvm.float
+                                     %two   = llvm.mlir.constant(2.0: f32) : f32
+                                     %2xArg = llvm.fmul %two, %arg : f32
+                                     %exp   = "llvm.intr.exp"(%2xArg) : (f32) -> f32
+%res = spv.Tanh %arg : f32     =>    %one   = llvm.mlir.constant(1.0 : f32) : f32
+                                     %num   = llvm.fsub %exp, %one : f32
+                                     %den   = llvm.fadd %exp, %one : f32
+                                     %res   = llvm.fdiv %num, %den : f32
 ```
 
 ## Function conversion and related ops

diff  --git a/mlir/docs/Tutorials/Toy/Ch-6.md b/mlir/docs/Tutorials/Toy/Ch-6.md
index 43bbb56714d1..1093ae9fe2ba 100644
--- a/mlir/docs/Tutorials/Toy/Ch-6.md
+++ b/mlir/docs/Tutorials/Toy/Ch-6.md
@@ -130,8 +130,8 @@ llvm.func @free(!llvm<"i8*">)
 llvm.func @printf(!llvm<"i8*">, ...) -> i32
 llvm.func @malloc(i64) -> !llvm<"i8*">
 llvm.func @main() {
-  %0 = llvm.mlir.constant(1.000000e+00 : f64) : !llvm.double
-  %1 = llvm.mlir.constant(2.000000e+00 : f64) : !llvm.double
+  %0 = llvm.mlir.constant(1.000000e+00 : f64) : f64
+  %1 = llvm.mlir.constant(2.000000e+00 : f64) : f64
 
   ...
 
@@ -144,9 +144,9 @@ llvm.func @main() {
   %226 = llvm.mlir.constant(1 : index) : i64
   %227 = llvm.mul %219, %226 : i64
   %228 = llvm.add %225, %227 : i64
-  %229 = llvm.getelementptr %221[%228] : (!llvm<"double*">, i64) -> !llvm<"double*">
+  %229 = llvm.getelementptr %221[%228] : (!llvm."double*">, i64) -> !llvm<"f64*">
   %230 = llvm.load %229 : !llvm<"double*">
-  %231 = llvm.call @printf(%207, %230) : (!llvm<"i8*">, !llvm.double) -> i32
+  %231 = llvm.call @printf(%207, %230) : (!llvm<"i8*">, f64) -> i32
   %232 = llvm.add %219, %218 : i64
   llvm.br ^bb15(%232 : i64)
 

diff  --git a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
index a48acb93c382..82cf7e772afe 100644
--- a/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
+++ b/mlir/include/mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h
@@ -150,8 +150,8 @@ class LLVMTypeConverter : public TypeConverter {
   /// Convert an integer type `i*` to `!llvm<"i*">`.
   Type convertIntegerType(IntegerType type);
 
-  /// Convert a floating point type: `f16` to `!llvm.half`, `f32` to
-  /// `!llvm.float` and `f64` to `!llvm.double`.  `bf16` is not supported
+  /// Convert a floating point type: `f16` to `f16`, `f32` to
+  /// `f32` and `f64` to `f64`.  `bf16` is not supported
   /// by LLVM.
   Type convertFloatType(FloatType type);
 
@@ -528,10 +528,10 @@ class ConvertToLLVMPattern : public ConversionPattern {
   /// `strides[1]` = llvm.mlir.constant(1 : index) : i64
   /// `strides[0]` = `sizes[0]`
   /// %size        = llvm.mul `sizes[0]`, `sizes[1]` : i64
-  /// %nullptr     = llvm.mlir.null : !llvm.ptr<float>
+  /// %nullptr     = llvm.mlir.null : !llvm.ptr<f32>
   /// %gep         = llvm.getelementptr %nullptr[%size]
-  ///                  : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  /// `sizeBytes`  = llvm.ptrtoint %gep : !llvm.ptr<float> to i64
+  ///                  : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  /// `sizeBytes`  = llvm.ptrtoint %gep : !llvm.ptr<f32> to i64
   void getMemRefDescriptorSizes(Location loc, MemRefType memRefType,
                                 ArrayRef<Value> dynamicSizes,
                                 ConversionPatternRewriter &rewriter,

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
index a2b807e1697e..0d3f5322531d 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
@@ -82,12 +82,7 @@ def LLVM_PrimitiveType : Type<
 
 // Type constraint accepting any LLVM floating point type.
 def LLVM_AnyFloat : Type<
-  CPred<"$_self.isa<::mlir::LLVM::LLVMBFloatType, "
-                   "::mlir::LLVM::LLVMHalfType, "
-                   "::mlir::LLVM::LLVMFloatType, "
-                   "::mlir::LLVM::LLVMDoubleType, "
-                   "::mlir::LLVM::LLVMFP128Type, "
-                   "::mlir::LLVM::LLVMX86FP80Type>()">,
+  CPred<"::mlir::LLVM::isCompatibleFloatingPointType($_self)">,
   "floating point LLVM type">;
 
 // Type constraint accepting any LLVM pointer type.

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index a6dcf79318df..ce91dffe861c 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -466,13 +466,13 @@ def LLVM_CallOp : LLVM_Op<"call",
 
     ```mlir
     // Direct call without arguments and with one result.
-    %0 = llvm.call @foo() : () -> (!llvm.float)
+    %0 = llvm.call @foo() : () -> (f32)
 
     // Direct call with arguments and without a result.
-    llvm.call @bar(%0) : (!llvm.float) -> ()
+    llvm.call @bar(%0) : (f32) -> ()
 
     // Indirect call with an argument and without a result.
-    llvm.call %1(%0) : (!llvm.float) -> ()
+    llvm.call %1(%0) : (f32) -> ()
     ```
   }];
   let arguments = (ins OptionalAttr<FlatSymbolRefAttr>:$callee,
@@ -847,7 +847,7 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
     represented as MLIR attributes can be given in-line:
 
     ```mlir
-    llvm.mlir.global @variable(32.0 : f32) : !llvm.float
+    llvm.mlir.global @variable(32.0 : f32) : f32
     ```
 
     This initialization and type syntax is similar to `llvm.mlir.constant` and
@@ -883,7 +883,7 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
     llvm.mlir.global constant @cst(42 : i32) : i32
 
     // Non-constant values must also be initialized.
-    llvm.mlir.global @variable(32.0 : f32) : !llvm.float
+    llvm.mlir.global @variable(32.0 : f32) : f32
 
     // Strings are expected to be of wrapped LLVM i8 array type and do not
     // automatically include the trailing zero.
@@ -915,7 +915,7 @@ def LLVM_GlobalOp : LLVM_Op<"mlir.global",
 
     // By default, "external" linkage is assumed and the global participates in
     // symbol resolution at link-time.
-    llvm.mlir.global @glob(0 : f32) : !llvm.float
+    llvm.mlir.global @glob(0 : f32) : f32
     ```
   }];
   let regions = (region AnyRegion:$initializer);
@@ -1073,7 +1073,7 @@ def LLVM_UndefOp : LLVM_Op<"mlir.undef", [NoSideEffect]>,
 
     ```mlir
     // Create a structure with a 32-bit integer followed by a float.
-    %0 = llvm.mlir.undef : !llvm.struct<(i32, float)>
+    %0 = llvm.mlir.undef : !llvm.struct<(i32, f32)>
     ```
   }];
   let results = (outs LLVM_Type:$res);
@@ -1108,10 +1108,10 @@ def LLVM_ConstantOp
     %1 = llvm.mlir.constant(42) : i64
 
     // Floating point constant.
-    %2 = llvm.mlir.constant(42.0 : f32) : !llvm.float
+    %2 = llvm.mlir.constant(42.0 : f32) : f32
 
     // Splat dense vector constant.
-    %3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x float>
+    %3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x f32>
     ```
   }];
 
@@ -1133,7 +1133,7 @@ def LLVM_DialectCastOp : LLVM_Op<"mlir.cast", [NoSideEffect]> {
     Example:
       llvm.mlir.cast %v : f16 to llvm.half
       llvm.mlir.cast %v : llvm.float to f32
-      llvm.mlir.cast %v : !llvm<"<2 x float>"> to vector<2xf32>
+      llvm.mlir.cast %v : !llvm."<2 x f32>"> to vector<2xf32>
   }];
   let arguments = (ins AnyType:$in);
   let results = (outs AnyType:$res);

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
index 4c0bee780e76..3cd1733b8d52 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
@@ -36,13 +36,8 @@ struct LLVMStructTypeStorage;
 struct LLVMTypeAndSizeStorage;
 } // namespace detail
 
-class LLVMBFloatType;
-class LLVMHalfType;
-class LLVMFloatType;
-class LLVMDoubleType;
 class LLVMFP128Type;
 class LLVMX86FP80Type;
-class LLVMIntegerType;
 
 //===----------------------------------------------------------------------===//
 // Trivial types.
@@ -56,10 +51,6 @@ class LLVMIntegerType;
   }
 
 DEFINE_TRIVIAL_LLVM_TYPE(LLVMVoidType);
-DEFINE_TRIVIAL_LLVM_TYPE(LLVMHalfType);
-DEFINE_TRIVIAL_LLVM_TYPE(LLVMBFloatType);
-DEFINE_TRIVIAL_LLVM_TYPE(LLVMFloatType);
-DEFINE_TRIVIAL_LLVM_TYPE(LLVMDoubleType);
 DEFINE_TRIVIAL_LLVM_TYPE(LLVMFP128Type);
 DEFINE_TRIVIAL_LLVM_TYPE(LLVMX86FP80Type);
 DEFINE_TRIVIAL_LLVM_TYPE(LLVMPPCFP128Type);
@@ -389,10 +380,9 @@ void printType(Type type, DialectAsmPrinter &printer);
 /// Returns `true` if the given type is compatible with the LLVM dialect.
 bool isCompatibleType(Type type);
 
-inline bool isCompatibleFloatingPointType(Type type) {
-  return type.isa<LLVMHalfType, LLVMBFloatType, LLVMFloatType, LLVMDoubleType,
-                  LLVMFP128Type, LLVMX86FP80Type>();
-}
+/// Returns `true` if the given type is a floating-point type compatible with
+/// the LLVM dialect.
+bool isCompatibleFloatingPointType(Type type);
 
 /// Returns the size of the given primitive LLVM dialect-compatible type
 /// (including vectors) in bits, for example, the size of i16 is 16 and

diff  --git a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
index 3aa556a832a0..1d076e64ba2b 100644
--- a/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
+++ b/mlir/integration_test/Dialect/LLVMIR/CPU/test-vector-reductions-fp.mlir
@@ -5,82 +5,82 @@
 // End-to-end test of all fp reduction intrinsics (not exhaustive unit tests).
 module {
   llvm.func @printNewline()
-  llvm.func @printF32(!llvm.float)
+  llvm.func @printF32(f32)
   llvm.func @entry() {
     // Setup (1,2,3,4).
-    %0 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
-    %1 = llvm.mlir.constant(2.000000e+00 : f32) : !llvm.float
-    %2 = llvm.mlir.constant(3.000000e+00 : f32) : !llvm.float
-    %3 = llvm.mlir.constant(4.000000e+00 : f32) : !llvm.float
-    %4 = llvm.mlir.undef : !llvm.vec<4 x float>
+    %0 = llvm.mlir.constant(1.000000e+00 : f32) : f32
+    %1 = llvm.mlir.constant(2.000000e+00 : f32) : f32
+    %2 = llvm.mlir.constant(3.000000e+00 : f32) : f32
+    %3 = llvm.mlir.constant(4.000000e+00 : f32) : f32
+    %4 = llvm.mlir.undef : !llvm.vec<4 x f32>
     %5 = llvm.mlir.constant(0 : index) : i64
-    %6 = llvm.insertelement %0, %4[%5 : i64] : !llvm.vec<4 x float>
+    %6 = llvm.insertelement %0, %4[%5 : i64] : !llvm.vec<4 x f32>
     %7 = llvm.shufflevector %6, %4 [0 : i32, 0 : i32, 0 : i32, 0 : i32]
-        : !llvm.vec<4 x float>, !llvm.vec<4 x float>
+        : !llvm.vec<4 x f32>, !llvm.vec<4 x f32>
     %8 = llvm.mlir.constant(1 : i64) : i64
-    %9 = llvm.insertelement %1, %7[%8 : i64] : !llvm.vec<4 x float>
+    %9 = llvm.insertelement %1, %7[%8 : i64] : !llvm.vec<4 x f32>
     %10 = llvm.mlir.constant(2 : i64) : i64
-    %11 = llvm.insertelement %2, %9[%10 : i64] : !llvm.vec<4 x float>
+    %11 = llvm.insertelement %2, %9[%10 : i64] : !llvm.vec<4 x f32>
     %12 = llvm.mlir.constant(3 : i64) : i64
-    %v = llvm.insertelement %3, %11[%12 : i64] : !llvm.vec<4 x float>
+    %v = llvm.insertelement %3, %11[%12 : i64] : !llvm.vec<4 x f32>
 
     %max = "llvm.intr.vector.reduce.fmax"(%v)
-        : (!llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%max) : (!llvm.float) -> ()
+        : (!llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%max) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 4
 
     %min = "llvm.intr.vector.reduce.fmin"(%v)
-        : (!llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%min) : (!llvm.float) -> ()
+        : (!llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%min) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 1
 
     %add1 = "llvm.intr.vector.reduce.fadd"(%0, %v)
-        : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%add1) : (!llvm.float) -> ()
+        : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%add1) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 11
 
     %add1r = "llvm.intr.vector.reduce.fadd"(%0, %v)
-        {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%add1r) : (!llvm.float) -> ()
+        {reassoc = true} : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%add1r) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 11
 
     %add2 = "llvm.intr.vector.reduce.fadd"(%1, %v)
-        : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%add2) : (!llvm.float) -> ()
+        : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%add2) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 12
 
     %add2r = "llvm.intr.vector.reduce.fadd"(%1, %v)
-        {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%add2r) : (!llvm.float) -> ()
+        {reassoc = true} : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%add2r) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 12
 
     %mul1 = "llvm.intr.vector.reduce.fmul"(%0, %v)
-        : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%mul1) : (!llvm.float) -> ()
+        : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%mul1) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 24
 
     %mul1r = "llvm.intr.vector.reduce.fmul"(%0, %v)
-        {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%mul1r) : (!llvm.float) -> ()
+        {reassoc = true} : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%mul1r) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 24
 
     %mul2 = "llvm.intr.vector.reduce.fmul"(%1, %v)
-        : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%mul2) : (!llvm.float) -> ()
+        : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%mul2) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 48
 
     %mul2r = "llvm.intr.vector.reduce.fmul"(%1, %v)
-        {reassoc = true} : (!llvm.float, !llvm.vec<4 x float>) -> !llvm.float
-    llvm.call @printF32(%mul2r) : (!llvm.float) -> ()
+        {reassoc = true} : (f32, !llvm.vec<4 x f32>) -> f32
+    llvm.call @printF32(%mul2r) : (f32) -> ()
     llvm.call @printNewline() : () -> ()
     // CHECK: 48
 

diff  --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index 43537fe62217..92b01cea6fca 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -947,8 +947,10 @@ class AwaitValueOpLowering : public AwaitOpLoweringBase<AwaitOp, ValueType> {
     // Load from the async value storage.
     auto loaded = rewriter.create<LLVM::LoadOp>(loc, castedStorage.getResult());
 
-    // Cast from LLVM type to the expected value type. This cast will become
-    // no-op after lowering to LLVM.
+    // Cast from LLVM type to the expected value type if necessary. This cast
+    // will become no-op after lowering to LLVM.
+    if (valueType == loaded.getType())
+      return loaded;
     return rewriter.create<LLVM::DialectCastOp>(loc, valueType, loaded);
   }
 };

diff  --git a/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h b/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
index 5b98d3cee1fb..f98009d65e84 100644
--- a/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
+++ b/mlir/lib/Conversion/GPUCommon/OpToFuncCallLowering.h
@@ -27,7 +27,7 @@ namespace mlir {
 ///   %exp_f32 = std.exp %arg_f32 : f32
 ///
 /// will be transformed into
-///   llvm.call @__nv_expf(%arg_f32) : (!llvm.float) -> !llvm.float
+///   llvm.call @__nv_expf(%arg_f32) : (f32) -> f32
 template <typename SourceOp>
 struct OpToFuncCallLowering : public ConvertOpToLLVMPattern<SourceOp> {
 public:
@@ -79,12 +79,11 @@ struct OpToFuncCallLowering : public ConvertOpToLLVMPattern<SourceOp> {
 private:
   Value maybeCast(Value operand, PatternRewriter &rewriter) const {
     Type type = operand.getType();
-    if (!type.isa<LLVM::LLVMHalfType>())
+    if (!type.isa<Float16Type>())
       return operand;
 
     return rewriter.create<LLVM::FPExtOp>(
-        operand.getLoc(), LLVM::LLVMFloatType::get(rewriter.getContext()),
-        operand);
+        operand.getLoc(), Float32Type::get(rewriter.getContext()), operand);
   }
 
   Type getFunctionType(Type resultType, ArrayRef<Value> operands) const {
@@ -96,9 +95,9 @@ struct OpToFuncCallLowering : public ConvertOpToLLVMPattern<SourceOp> {
   }
 
   StringRef getFunctionName(Type type) const {
-    if (type.isa<LLVM::LLVMFloatType>())
+    if (type.isa<Float32Type>())
       return f32Func;
-    if (type.isa<LLVM::LLVMDoubleType>())
+    if (type.isa<Float64Type>())
       return f64Func;
     return "";
   }

diff  --git a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
index eb5cf94b4751..47968ea458ce 100644
--- a/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
+++ b/mlir/lib/Conversion/GPUToVulkan/ConvertLaunchFuncToVulkanCalls.cpp
@@ -57,7 +57,7 @@ class VulkanLaunchFuncToVulkanCallsPass
           VulkanLaunchFuncToVulkanCallsPass> {
 private:
   void initializeCachedTypes() {
-    llvmFloatType = LLVM::LLVMFloatType::get(&getContext());
+    llvmFloatType = Float32Type::get(&getContext());
     llvmVoidType = LLVM::LLVMVoidType::get(&getContext());
     llvmPointerType =
         LLVM::LLVMPointerType::get(IntegerType::get(&getContext(), 8));
@@ -132,9 +132,9 @@ class VulkanLaunchFuncToVulkanCallsPass
 
   /// Returns a string representation from the given `type`.
   StringRef stringifyType(Type type) {
-    if (type.isa<LLVM::LLVMFloatType>())
+    if (type.isa<Float32Type>())
       return "Float";
-    if (type.isa<LLVM::LLVMHalfType>())
+    if (type.isa<Float16Type>())
       return "Half";
     if (auto intType = type.dyn_cast<IntegerType>()) {
       if (intType.getWidth() == 32)
@@ -241,7 +241,7 @@ void VulkanLaunchFuncToVulkanCallsPass::createBindMemRefCalls(
         llvm::formatv("bindMemRef{0}D{1}", rank, stringifyType(type)).str();
     // Special case for fp16 type. Since it is not a supported type in C we use
     // int16_t and bitcast the descriptor.
-    if (type.isa<LLVM::LLVMHalfType>()) {
+    if (type.isa<Float16Type>()) {
       auto memRefTy = getMemRefType(rank, IntegerType::get(&getContext(), 16));
       ptrToMemRefDescriptor = builder.create<LLVM::BitcastOp>(
           loc, LLVM::LLVMPointerType::get(memRefTy), ptrToMemRefDescriptor);
@@ -323,15 +323,14 @@ void VulkanLaunchFuncToVulkanCallsPass::declareVulkanFunctions(Location loc) {
   }
 
   for (unsigned i = 1; i <= 3; i++) {
-    SmallVector<Type, 5> types{LLVM::LLVMFloatType::get(&getContext()),
-                               IntegerType::get(&getContext(), 32),
-                               IntegerType::get(&getContext(), 16),
-                               IntegerType::get(&getContext(), 8),
-                               LLVM::LLVMHalfType::get(&getContext())};
+    SmallVector<Type, 5> types{
+        Float32Type::get(&getContext()), IntegerType::get(&getContext(), 32),
+        IntegerType::get(&getContext(), 16), IntegerType::get(&getContext(), 8),
+        Float16Type::get(&getContext())};
     for (auto type : types) {
       std::string fnName = "bindMemRef" + std::to_string(i) + "D" +
                            std::string(stringifyType(type));
-      if (type.isa<LLVM::LLVMHalfType>())
+      if (type.isa<Float16Type>())
         type = IntegerType::get(&getContext(), 16);
       if (!module.lookupSymbol(fnName)) {
         auto fnType = LLVM::LLVMFunctionType::get(

diff  --git a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
index e73bc669cd50..8023a8009758 100644
--- a/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
+++ b/mlir/lib/Conversion/StandardToLLVM/StandardToLLVM.cpp
@@ -189,17 +189,7 @@ Type LLVMTypeConverter::convertIntegerType(IntegerType type) {
   return IntegerType::get(&getContext(), type.getWidth());
 }
 
-Type LLVMTypeConverter::convertFloatType(FloatType type) {
-  if (type.isa<Float32Type>())
-    return LLVM::LLVMFloatType::get(&getContext());
-  if (type.isa<Float64Type>())
-    return LLVM::LLVMDoubleType::get(&getContext());
-  if (type.isa<Float16Type>())
-    return LLVM::LLVMHalfType::get(&getContext());
-  if (type.isa<BFloat16Type>())
-    return LLVM::LLVMBFloatType::get(&getContext());
-  llvm_unreachable("non-float type in convertFloatType");
-}
+Type LLVMTypeConverter::convertFloatType(FloatType type) { return type; }
 
 // Convert a `ComplexType` to an LLVM type. The result is a complex number
 // struct with entries for the
@@ -402,8 +392,8 @@ Type LLVMTypeConverter::convertMemRefToBarePtr(BaseMemRefType type) {
 
 // Convert an n-D vector type to an LLVM vector type via (n-1)-D array type when
 // n > 1.
-// For example, `vector<4 x f32>` converts to `!llvm.type<"<4 x float>">` and
-// `vector<4 x 8 x 16 f32>` converts to `!llvm<"[4 x [8 x <16 x float>]]">`.
+// For example, `vector<4 x f32>` converts to `!llvm.type<"<4 x f32>">` and
+// `vector<4 x 8 x 16 f32>` converts to `!llvm."[4 x [8 x <16 x f32>]]">`.
 Type LLVMTypeConverter::convertVectorType(VectorType type) {
   auto elementType = unwrap(convertType(type.getElementType()));
   if (!elementType)

diff  --git a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
index 5b66a31eaeaf..5ad266ce0bd7 100644
--- a/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
+++ b/mlir/lib/Conversion/VectorToLLVM/ConvertVectorToLLVM.cpp
@@ -768,8 +768,8 @@ class VectorExtractOpConversion
 /// is converted to:
 /// ```
 ///  llvm.intr.fmuladd %va, %va, %va:
-///    (!llvm<"<8 x float>">, !llvm<"<8 x float>">, !llvm<"<8 x float>">)
-///    -> !llvm<"<8 x float>">
+///    (!llvm."<8 x f32>">, !llvm<"<8 x f32>">, !llvm<"<8 x f32>">)
+///    -> !llvm."<8 x f32>">
 /// ```
 class VectorFMAOp1DConversion : public ConvertOpToLLVMPattern<vector::FMAOp> {
 public:
@@ -1420,11 +1420,10 @@ class VectorPrintOpConversion : public ConvertOpToLLVMPattern<vector::PrintOp> {
     return getPrint(op, "printU64", IntegerType::get(op->getContext(), 64));
   }
   Operation *getPrintFloat(Operation *op) const {
-    return getPrint(op, "printF32", LLVM::LLVMFloatType::get(op->getContext()));
+    return getPrint(op, "printF32", Float32Type::get(op->getContext()));
   }
   Operation *getPrintDouble(Operation *op) const {
-    return getPrint(op, "printF64",
-                    LLVM::LLVMDoubleType::get(op->getContext()));
+    return getPrint(op, "printF64", Float64Type::get(op->getContext()));
   }
   Operation *getPrintOpen(Operation *op) const {
     return getPrint(op, "printOpen", {});

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 2e8f2bf16461..3c9329ff0eb5 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -1258,8 +1258,16 @@ static LogicalResult verifyCastWithIndex(Type llvmType) {
 }
 
 /// Checks if `llvmType` is dialect cast-compatible with built-in `type` and
-/// reports errors to the location of `op`.
-static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
+/// reports errors to the location of `op`. `isElement` indicates whether the
+/// verification is performed for types that are element types inside a
+/// container; we don't want casts from X to X at the top level, but c1<X> to
+/// c2<X> may be fine.
+static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type,
+                                bool isElement = false) {
+  // Equal element types are directly compatible.
+  if (isElement && llvmType == type)
+    return success();
+
   // Index is compatible with any integer.
   if (type.isIndex()) {
     if (succeeded(verifyCastWithIndex(llvmType)))
@@ -1268,32 +1276,6 @@ static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
     return op.emitOpError("invalid cast between index and non-integer type");
   }
 
-  // Simple one-to-one mappings for floating point types.
-  if (type.isF16()) {
-    if (llvmType.isa<LLVMHalfType>())
-      return success();
-    return op.emitOpError(
-        "invalid cast between f16 and a type other than !llvm.half");
-  }
-  if (type.isBF16()) {
-    if (llvmType.isa<LLVMBFloatType>())
-      return success();
-    return op->emitOpError(
-        "invalid cast between bf16 and a type other than !llvm.bfloat");
-  }
-  if (type.isF32()) {
-    if (llvmType.isa<LLVMFloatType>())
-      return success();
-    return op->emitOpError(
-        "invalid cast between f32 and a type other than !llvm.float");
-  }
-  if (type.isF64()) {
-    if (llvmType.isa<LLVMDoubleType>())
-      return success();
-    return op->emitOpError(
-        "invalid cast between f64 and a type other than !llvm.double");
-  }
-
   // Vectors are compatible if they are 1D non-scalable, and their element types
   // are compatible.
   if (auto vectorType = type.dyn_cast<VectorType>()) {
@@ -1309,7 +1291,7 @@ static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
           "invalid cast between vectors with mismatching sizes");
 
     return verifyCast(op, llvmVector.getElementType(),
-                      vectorType.getElementType());
+                      vectorType.getElementType(), /*isElement=*/true);
   }
 
   if (auto memrefType = type.dyn_cast<MemRefType>()) {
@@ -1324,7 +1306,7 @@ static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
                              "
diff erent memory spaces");
 
       return verifyCast(op, ptrType.getElementType(),
-                        memrefType.getElementType());
+                        memrefType.getElementType(), /*isElement=*/true);
     }
 
     // Otherwise, memrefs are convertible to a descriptor, which is a structure
@@ -1347,7 +1329,7 @@ static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
       return op->emitOpError("expected first element of a memref descriptor to "
                              "be a pointer in the address space of the memref");
     if (failed(verifyCast(op, allocatedPtr.getElementType(),
-                          memrefType.getElementType())))
+                          memrefType.getElementType(), /*isElement=*/true)))
       return failure();
 
     auto alignedPtr = structType.getBody()[1].dyn_cast<LLVMPointerType>();
@@ -1357,7 +1339,7 @@ static LogicalResult verifyCast(DialectCastOp op, Type llvmType, Type type) {
           "expected second element of a memref descriptor to "
           "be a pointer in the address space of the memref");
     if (failed(verifyCast(op, alignedPtr.getElementType(),
-                          memrefType.getElementType())))
+                          memrefType.getElementType(), /*isElement=*/true)))
       return failure();
 
     // The second element (offset) is an equivalent of index.
@@ -1946,9 +1928,9 @@ static LogicalResult verify(AtomicRMWOp op) {
     auto intType = valType.dyn_cast<IntegerType>();
     unsigned intBitWidth = intType ? intType.getWidth() : 0;
     if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 &&
-        intBitWidth != 64 && !valType.isa<LLVMBFloatType>() &&
-        !valType.isa<LLVMHalfType>() && !valType.isa<LLVMFloatType>() &&
-        !valType.isa<LLVMDoubleType>())
+        intBitWidth != 64 && !valType.isa<BFloat16Type>() &&
+        !valType.isa<Float16Type>() && !valType.isa<Float32Type>() &&
+        !valType.isa<Float64Type>())
       return op.emitOpError("unexpected LLVM IR type for 'xchg' bin_op");
   } else {
     auto intType = valType.dyn_cast<IntegerType>();
@@ -2014,8 +1996,8 @@ static LogicalResult verify(AtomicCmpXchgOp op) {
   unsigned intBitWidth = intType ? intType.getWidth() : 0;
   if (!valType.isa<LLVMPointerType>() && intBitWidth != 8 &&
       intBitWidth != 16 && intBitWidth != 32 && intBitWidth != 64 &&
-      !valType.isa<LLVMBFloatType>() && !valType.isa<LLVMHalfType>() &&
-      !valType.isa<LLVMFloatType>() && !valType.isa<LLVMDoubleType>())
+      !valType.isa<BFloat16Type>() && !valType.isa<Float16Type>() &&
+      !valType.isa<Float32Type>() && !valType.isa<Float64Type>())
     return op.emitOpError("unexpected LLVM IR type");
   if (op.success_ordering() < AtomicOrdering::monotonic ||
       op.failure_ordering() < AtomicOrdering::monotonic)
@@ -2076,10 +2058,6 @@ void LLVMDialect::initialize() {
 
   // clang-format off
   addTypes<LLVMVoidType,
-           LLVMHalfType,
-           LLVMBFloatType,
-           LLVMFloatType,
-           LLVMDoubleType,
            LLVMFP128Type,
            LLVMX86FP80Type,
            LLVMPPCFP128Type,

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
index 6f5ea1d813a1..18a4262bcaf8 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypeSyntax.cpp
@@ -24,7 +24,8 @@ using namespace mlir::LLVM;
 /// internal functions to avoid getting a verbose `!llvm` prefix. Otherwise
 /// prints it as usual.
 static void dispatchPrint(DialectAsmPrinter &printer, Type type) {
-  if (isCompatibleType(type) && !type.isa<IntegerType>())
+  if (isCompatibleType(type) && !type.isa<IntegerType>() &&
+      !type.isa<FloatType>())
     return mlir::LLVM::detail::printType(type, printer);
   printer.printType(type);
 }
@@ -33,10 +34,6 @@ static void dispatchPrint(DialectAsmPrinter &printer, Type type) {
 static StringRef getTypeKeyword(Type type) {
   return TypeSwitch<Type, StringRef>(type)
       .Case<LLVMVoidType>([&](Type) { return "void"; })
-      .Case<LLVMHalfType>([&](Type) { return "half"; })
-      .Case<LLVMBFloatType>([&](Type) { return "bfloat"; })
-      .Case<LLVMFloatType>([&](Type) { return "float"; })
-      .Case<LLVMDoubleType>([&](Type) { return "double"; })
       .Case<LLVMFP128Type>([&](Type) { return "fp128"; })
       .Case<LLVMX86FP80Type>([&](Type) { return "x86_fp80"; })
       .Case<LLVMPPCFP128Type>([&](Type) { return "ppc_fp128"; })
@@ -412,6 +409,7 @@ static LLVMStructType parseStructType(DialectAsmParser &parser) {
 /// LLVM dialect types without the `!llvm` prefix.
 static Type dispatchParse(DialectAsmParser &parser, bool allowAny = true) {
   llvm::SMLoc keyLoc = parser.getCurrentLocation();
+  Location loc = parser.getEncodedSourceLoc(keyLoc);
 
   // Try parsing any MLIR type.
   Type type;
@@ -427,7 +425,6 @@ static Type dispatchParse(DialectAsmParser &parser, bool allowAny = true) {
         parser.emitError(keyLoc) << "unexpected type, expected keyword";
         return nullptr;
       }
-      Location loc = parser.getEncodedSourceLoc(keyLoc);
       emitWarning(loc) << "deprecated syntax, drop '!llvm.' for integers";
     }
     return type;
@@ -441,10 +438,26 @@ static Type dispatchParse(DialectAsmParser &parser, bool allowAny = true) {
   MLIRContext *ctx = parser.getBuilder().getContext();
   return StringSwitch<function_ref<Type()>>(key)
       .Case("void", [&] { return LLVMVoidType::get(ctx); })
-      .Case("half", [&] { return LLVMHalfType::get(ctx); })
-      .Case("bfloat", [&] { return LLVMBFloatType::get(ctx); })
-      .Case("float", [&] { return LLVMFloatType::get(ctx); })
-      .Case("double", [&] { return LLVMDoubleType::get(ctx); })
+      .Case("bfloat",
+            [&] {
+              emitWarning(loc) << "deprecated syntax, use bf16 instead";
+              return BFloat16Type::get(ctx);
+            })
+      .Case("half",
+            [&] {
+              emitWarning(loc) << "deprecated syntax, use f16 instead";
+              return Float16Type::get(ctx);
+            })
+      .Case("float",
+            [&] {
+              emitWarning(loc) << "deprecated syntax, use f32 instead";
+              return Float32Type::get(ctx);
+            })
+      .Case("double",
+            [&] {
+              emitWarning(loc) << "deprecated syntax, use f64 instead";
+              return Float64Type::get(ctx);
+            })
       .Case("fp128", [&] { return LLVMFP128Type::get(ctx); })
       .Case("x86_fp80", [&] { return LLVMX86FP80Type::get(ctx); })
       .Case("ppc_fp128", [&] { return LLVMPPCFP128Type::get(ctx); })

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
index d6a037f363b6..ce6f052eb871 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
@@ -318,13 +318,13 @@ bool mlir::LLVM::isCompatibleType(Type type) {
 
   // clang-format off
   return type.isa<
+      BFloat16Type,
+      Float16Type,
+      Float32Type,
+      Float64Type,
       LLVMArrayType,
-      LLVMBFloatType,
-      LLVMDoubleType,
       LLVMFP128Type,
-      LLVMFloatType,
       LLVMFunctionType,
-      LLVMHalfType,
       LLVMLabelType,
       LLVMMetadataType,
       LLVMPPCFP128Type,
@@ -339,15 +339,20 @@ bool mlir::LLVM::isCompatibleType(Type type) {
   // clang-format on
 }
 
+bool mlir::LLVM::isCompatibleFloatingPointType(Type type) {
+  return type.isa<BFloat16Type, Float16Type, Float32Type, Float64Type,
+                  LLVMFP128Type, LLVMPPCFP128Type, LLVMX86FP80Type>();
+}
+
 llvm::TypeSize mlir::LLVM::getPrimitiveTypeSizeInBits(Type type) {
   assert(isCompatibleType(type) &&
          "expected a type compatible with the LLVM dialect");
 
   return llvm::TypeSwitch<Type, llvm::TypeSize>(type)
-      .Case<LLVMHalfType, LLVMBFloatType>(
+      .Case<BFloat16Type, Float16Type>(
           [](Type) { return llvm::TypeSize::Fixed(16); })
-      .Case<LLVMFloatType>([](Type) { return llvm::TypeSize::Fixed(32); })
-      .Case<LLVMDoubleType, LLVMX86MMXType>(
+      .Case<Float32Type>([](Type) { return llvm::TypeSize::Fixed(32); })
+      .Case<Float64Type, LLVMX86MMXType>(
           [](Type) { return llvm::TypeSize::Fixed(64); })
       .Case<IntegerType>([](IntegerType intTy) {
         return llvm::TypeSize::Fixed(intTy.getWidth());

diff  --git a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
index b451623e628a..8bfbbf857b70 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/NVVMDialect.cpp
@@ -86,9 +86,9 @@ static ParseResult parseNVVMVoteBallotOp(OpAsmParser &parser,
 
 static LogicalResult verify(MmaOp op) {
   MLIRContext *context = op.getContext();
-  auto f16Ty = LLVM::LLVMHalfType::get(context);
+  auto f16Ty = Float16Type::get(context);
   auto f16x2Ty = LLVM::LLVMFixedVectorType::get(f16Ty, 2);
-  auto f32Ty = LLVM::LLVMFloatType::get(context);
+  auto f32Ty = Float32Type::get(context);
   auto f16x2x4StructTy = LLVM::LLVMStructType::getLiteral(
       context, {f16x2Ty, f16x2Ty, f16x2Ty, f16x2Ty});
   auto f32x8StructTy = LLVM::LLVMStructType::getLiteral(

diff  --git a/mlir/lib/ExecutionEngine/JitRunner.cpp b/mlir/lib/ExecutionEngine/JitRunner.cpp
index c7548b0d8a85..08d947b3c6f7 100644
--- a/mlir/lib/ExecutionEngine/JitRunner.cpp
+++ b/mlir/lib/ExecutionEngine/JitRunner.cpp
@@ -201,7 +201,7 @@ Error checkCompatibleReturnType<int32_t>(LLVM::LLVMFuncOp mainFunction) {
                         .getReturnType()
                         .dyn_cast<IntegerType>();
   if (!resultType || resultType.getWidth() != 32)
-    return make_string_error("only single llvm.i32 function result supported");
+    return make_string_error("only single i32 function result supported");
   return Error::success();
 }
 template <>
@@ -211,7 +211,7 @@ Error checkCompatibleReturnType<int64_t>(LLVM::LLVMFuncOp mainFunction) {
                         .getReturnType()
                         .dyn_cast<IntegerType>();
   if (!resultType || resultType.getWidth() != 64)
-    return make_string_error("only single llvm.i64 function result supported");
+    return make_string_error("only single i64 function result supported");
   return Error::success();
 }
 template <>
@@ -219,8 +219,8 @@ Error checkCompatibleReturnType<float>(LLVM::LLVMFuncOp mainFunction) {
   if (!mainFunction.getType()
            .cast<LLVM::LLVMFunctionType>()
            .getReturnType()
-           .isa<LLVM::LLVMFloatType>())
-    return make_string_error("only single llvm.f32 function result supported");
+           .isa<Float32Type>())
+    return make_string_error("only single f32 function result supported");
   return Error::success();
 }
 template <typename Type>

diff  --git a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
index 0159d2a57c7d..93a587bf8e64 100644
--- a/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
+++ b/mlir/lib/Target/LLVMIR/ConvertFromLLVMIR.cpp
@@ -172,14 +172,8 @@ Type Importer::getStdTypeForAttr(Type type) {
   if (!type)
     return nullptr;
 
-  if (auto intType = type.dyn_cast<IntegerType>())
-    return intType;
-
-  if (type.isa<LLVMFloatType>())
-    return b.getF32Type();
-
-  if (type.isa<LLVMDoubleType>())
-    return b.getF64Type();
+  if (type.isa<IntegerType, FloatType>())
+    return type;
 
   // LLVM vectors can only contain scalars.
   if (auto vectorType = type.dyn_cast<LLVM::LLVMVectorType>()) {
@@ -269,7 +263,7 @@ Attribute Importer::getConstantAsAttr(llvm::Constant *value) {
       return DenseElementsAttr::get(attrType, values);
     }
 
-    if (type.isa<LLVMFloatType>() || type.isa<LLVMDoubleType>()) {
+    if (type.isa<Float32Type, Float64Type>()) {
       SmallVector<APFloat, 8> values;
       values.reserve(cd->getNumElements());
       for (unsigned i = 0, e = cd->getNumElements(); i < e; ++i)

diff  --git a/mlir/lib/Target/LLVMIR/TypeTranslation.cpp b/mlir/lib/Target/LLVMIR/TypeTranslation.cpp
index 2c024dd2489e..bb773f09cf2c 100644
--- a/mlir/lib/Target/LLVMIR/TypeTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/TypeTranslation.cpp
@@ -39,16 +39,14 @@ class TypeToLLVMIRTranslatorImpl {
             .Case([this](LLVM::LLVMVoidType) {
               return llvm::Type::getVoidTy(context);
             })
-            .Case([this](LLVM::LLVMHalfType) {
-              return llvm::Type::getHalfTy(context);
-            })
-            .Case([this](LLVM::LLVMBFloatType) {
+            .Case(
+                [this](Float16Type) { return llvm::Type::getHalfTy(context); })
+            .Case([this](BFloat16Type) {
               return llvm::Type::getBFloatTy(context);
             })
-            .Case([this](LLVM::LLVMFloatType) {
-              return llvm::Type::getFloatTy(context);
-            })
-            .Case([this](LLVM::LLVMDoubleType) {
+            .Case(
+                [this](Float32Type) { return llvm::Type::getFloatTy(context); })
+            .Case([this](Float64Type) {
               return llvm::Type::getDoubleTy(context);
             })
             .Case([this](LLVM::LLVMFP128Type) {
@@ -215,13 +213,13 @@ class TypeFromLLVMIRTranslatorImpl {
     if (type->isVoidTy())
       return LLVM::LLVMVoidType::get(&context);
     if (type->isHalfTy())
-      return LLVM::LLVMHalfType::get(&context);
+      return Float16Type::get(&context);
     if (type->isBFloatTy())
-      return LLVM::LLVMBFloatType::get(&context);
+      return BFloat16Type::get(&context);
     if (type->isFloatTy())
-      return LLVM::LLVMFloatType::get(&context);
+      return Float32Type::get(&context);
     if (type->isDoubleTy())
-      return LLVM::LLVMDoubleType::get(&context);
+      return Float64Type::get(&context);
     if (type->isFP128Ty())
       return LLVM::LLVMFP128Type::get(&context);
     if (type->isX86_FP80Ty())

diff  --git a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
index 75b323490cbb..f47671b969a8 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
@@ -224,8 +224,7 @@ func @execute_and_return_f32() -> f32 {
 
   // CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[RET]]#1)
   // CHECK: %[[ST_F32:.*]] = llvm.bitcast %[[STORAGE]]
-  // CHECK: %[[LOADED:.*]] = llvm.load %[[ST_F32]] :  !llvm.ptr<float>
-  // CHECK: %[[CASTED:.*]] = llvm.mlir.cast %[[LOADED]] : !llvm.float to f32
+  // CHECK: %[[LOADED:.*]] = llvm.load %[[ST_F32]] :  !llvm.ptr<f32>
   %0 = async.await %result : !async.value<f32>
 
   return %0 : f32
@@ -243,10 +242,9 @@ func @execute_and_return_f32() -> f32 {
 
 // Emplace result value.
 // CHECK: %[[CST:.*]] = constant 1.230000e+02 : f32
-// CHECK: %[[LLVM_CST:.*]] = llvm.mlir.cast %[[CST]] : f32 to !llvm.float
 // CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
 // CHECK: %[[ST_F32:.*]] = llvm.bitcast %[[STORAGE]]
-// CHECK: llvm.store %[[LLVM_CST]], %[[ST_F32]] : !llvm.ptr<float>
+// CHECK: llvm.store %[[CST]], %[[ST_F32]] : !llvm.ptr<f32>
 // CHECK: call @mlirAsyncRuntimeEmplaceValue(%[[VALUE]])
 
 // Emplace result token.
@@ -295,9 +293,8 @@ func @async_value_operands() {
 // Get the operand value storage, cast to f32 and add the value.
 // CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%arg0)
 // CHECK: %[[ST_F32:.*]] = llvm.bitcast %[[STORAGE]]
-// CHECK: %[[LOADED:.*]] = llvm.load %[[ST_F32]] :  !llvm.ptr<float>
-// CHECK: %[[CASTED:.*]] = llvm.mlir.cast %[[LOADED]] : !llvm.float to f32
-// CHECK: addf %[[CASTED]], %[[CASTED]] : f32
+// CHECK: %[[LOADED:.*]] = llvm.load %[[ST_F32]] :  !llvm.ptr<f32>
+// CHECK: addf %[[LOADED]], %[[LOADED]] : f32
 
 // Emplace result token.
 // CHECK: call @mlirAsyncRuntimeEmplaceToken(%[[TOKEN]])

diff  --git a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
index 14db10856393..634385cf1a64 100644
--- a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
@@ -10,8 +10,8 @@ module attributes {gpu.container_module} {
   gpu.module @kernel_module attributes {
       nvvm.cubin = "CUBIN", rocdl.hsaco = "HSACO"
   } {
-    llvm.func @kernel(%arg0: i32, %arg1: !llvm.ptr<float>,
-        %arg2: !llvm.ptr<float>, %arg3: i64, %arg4: i64,
+    llvm.func @kernel(%arg0: i32, %arg1: !llvm.ptr<f32>,
+        %arg2: !llvm.ptr<f32>, %arg3: i64, %arg4: i64,
         %arg5: i64) attributes {gpu.kernel} {
       llvm.return
     }

diff  --git a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
index 215a39edfddb..7f9b8b5fd76d 100644
--- a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
+++ b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
@@ -6,13 +6,13 @@ gpu.module @kernel {
   gpu.func @private(%arg0: f32) private(%arg1: memref<4xf32, 5>) {
     // Allocate private memory inside the function.
     // NVVM: %[[size:.*]] = llvm.mlir.constant(4 : i64) : i64
-    // NVVM: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (i64) -> !llvm.ptr<float>
+    // NVVM: %[[raw:.*]] = llvm.alloca %[[size]] x f32 : (i64) -> !llvm.ptr<f32>
 
     // ROCDL: %[[size:.*]] = llvm.mlir.constant(4 : i64) : i64
-    // ROCDL: %[[raw:.*]] = llvm.alloca %[[size]] x !llvm.float : (i64) -> !llvm.ptr<float, 5>
+    // ROCDL: %[[raw:.*]] = llvm.alloca %[[size]] x f32 : (i64) -> !llvm.ptr<f32, 5>
 
     // Populate the memref descriptor.
-    // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+    // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
     // NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
     // NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
     // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -22,7 +22,7 @@ gpu.module @kernel {
     // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
     // NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0]
 
-    // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 5>, ptr<float, 5>, i64, array<1 x i64>, array<1 x i64>)>
+    // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 5>, ptr<f32, 5>, i64, array<1 x i64>, array<1 x i64>)>
     // ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
     // ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
     // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -54,11 +54,11 @@ gpu.module @kernel {
   // Workgroup buffers are allocated as globals.
   // NVVM: llvm.mlir.global internal @[[$buffer:.*]]()
   // NVVM-SAME:  addr_space = 3
-  // NVVM-SAME:  !llvm.array<4 x float>
+  // NVVM-SAME:  !llvm.array<4 x f32>
 
   // ROCDL: llvm.mlir.global internal @[[$buffer:.*]]()
   // ROCDL-SAME:  addr_space = 3
-  // ROCDL-SAME:  !llvm.array<4 x float>
+  // ROCDL-SAME:  !llvm.array<4 x f32>
 
   // NVVM-LABEL: llvm.func @workgroup
   // NVVM-SAME: {
@@ -68,17 +68,17 @@ gpu.module @kernel {
   gpu.func @workgroup(%arg0: f32) workgroup(%arg1: memref<4xf32, 3>) {
     // Get the address of the first element in the global array.
     // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
-    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x float>, 3>
+    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x f32>, 3>
     // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
-    // NVVM-SAME: !llvm.ptr<float, 3>
+    // NVVM-SAME: !llvm.ptr<f32, 3>
 
     // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
-    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x float>, 3>
+    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<4 x f32>, 3>
     // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
-    // ROCDL-SAME: !llvm.ptr<float, 3>
+    // ROCDL-SAME: !llvm.ptr<f32, 3>
 
     // Populate the memref descriptor.
-    // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<1 x i64>, array<1 x i64>)>
+    // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<1 x i64>, array<1 x i64>)>
     // NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
     // NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
     // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -88,7 +88,7 @@ gpu.module @kernel {
     // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
     // NVVM: %[[descr6:.*]] = llvm.insertvalue %[[c1]], %[[descr5]][4, 0]
 
-    // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<1 x i64>, array<1 x i64>)>
+    // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<1 x i64>, array<1 x i64>)>
     // ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
     // ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
     // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -120,28 +120,28 @@ gpu.module @kernel {
   // Check that the total size was computed correctly.
   // NVVM: llvm.mlir.global internal @[[$buffer:.*]]()
   // NVVM-SAME:  addr_space = 3
-  // NVVM-SAME:  !llvm.array<48 x float>
+  // NVVM-SAME:  !llvm.array<48 x f32>
 
   // ROCDL: llvm.mlir.global internal @[[$buffer:.*]]()
   // ROCDL-SAME:  addr_space = 3
-  // ROCDL-SAME:  !llvm.array<48 x float>
+  // ROCDL-SAME:  !llvm.array<48 x f32>
 
   // NVVM-LABEL: llvm.func @workgroup3d
   // ROCDL-LABEL: llvm.func @workgroup3d
   gpu.func @workgroup3d(%arg0: f32) workgroup(%arg1: memref<4x2x6xf32, 3>) {
     // Get the address of the first element in the global array.
     // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
-    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x float>, 3>
+    // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x f32>, 3>
     // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
-    // NVVM-SAME: !llvm.ptr<float, 3>
+    // NVVM-SAME: !llvm.ptr<f32, 3>
 
     // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
-    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x float>, 3>
+    // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr<array<48 x f32>, 3>
     // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]]
-    // ROCDL-SAME: !llvm.ptr<float, 3>
+    // ROCDL-SAME: !llvm.ptr<f32, 3>
 
     // Populate the memref descriptor.
-    // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<3 x i64>, array<3 x i64>)>
+    // NVVM: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<3 x i64>, array<3 x i64>)>
     // NVVM: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
     // NVVM: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
     // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -159,7 +159,7 @@ gpu.module @kernel {
     // NVVM: %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
     // NVVM: %[[descr10:.*]] = llvm.insertvalue %[[c1]], %[[descr9]][4, 2]
 
-    // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<3 x i64>, array<3 x i64>)>
+    // ROCDL: %[[descr1:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<3 x i64>, array<3 x i64>)>
     // ROCDL: %[[descr2:.*]] = llvm.insertvalue %[[raw]], %[[descr1]][0]
     // ROCDL: %[[descr3:.*]] = llvm.insertvalue %[[raw]], %[[descr2]][1]
     // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
@@ -188,14 +188,14 @@ gpu.module @kernel {
 gpu.module @kernel {
   // Check that several buffers are defined.
   // NVVM: llvm.mlir.global internal @[[$buffer1:.*]]()
-  // NVVM-SAME:  !llvm.array<1 x float>
+  // NVVM-SAME:  !llvm.array<1 x f32>
   // NVVM: llvm.mlir.global internal @[[$buffer2:.*]]()
-  // NVVM-SAME:  !llvm.array<2 x float>
+  // NVVM-SAME:  !llvm.array<2 x f32>
 
   // ROCDL: llvm.mlir.global internal @[[$buffer1:.*]]()
-  // ROCDL-SAME:  !llvm.array<1 x float>
+  // ROCDL-SAME:  !llvm.array<1 x f32>
   // ROCDL: llvm.mlir.global internal @[[$buffer2:.*]]()
-  // ROCDL-SAME:  !llvm.array<2 x float>
+  // ROCDL-SAME:  !llvm.array<2 x f32>
 
   // NVVM-LABEL: llvm.func @multiple
   // ROCDL-LABEL: llvm.func @multiple
@@ -212,14 +212,14 @@ gpu.module @kernel {
 
     // Private buffers.
     // NVVM: %[[c3:.*]] = llvm.mlir.constant(3 : i64)
-    // NVVM: llvm.alloca %[[c3]] x !llvm.float : (i64) -> !llvm.ptr<float>
+    // NVVM: llvm.alloca %[[c3]] x f32 : (i64) -> !llvm.ptr<f32>
     // NVVM: %[[c4:.*]] = llvm.mlir.constant(4 : i64)
-    // NVVM: llvm.alloca %[[c4]] x !llvm.float : (i64) -> !llvm.ptr<float>
+    // NVVM: llvm.alloca %[[c4]] x f32 : (i64) -> !llvm.ptr<f32>
 
     // ROCDL: %[[c3:.*]] = llvm.mlir.constant(3 : i64)
-    // ROCDL: llvm.alloca %[[c3]] x !llvm.float : (i64) -> !llvm.ptr<float, 5>
+    // ROCDL: llvm.alloca %[[c3]] x f32 : (i64) -> !llvm.ptr<f32, 5>
     // ROCDL: %[[c4:.*]] = llvm.mlir.constant(4 : i64)
-    // ROCDL: llvm.alloca %[[c4]] x !llvm.float : (i64) -> !llvm.ptr<float, 5>
+    // ROCDL: llvm.alloca %[[c4]] x f32 : (i64) -> !llvm.ptr<f32, 5>
 
     %c0 = constant 0 : index
     store %arg0, %arg1[%c0] : memref<1xf32, 3>

diff  --git a/mlir/test/Conversion/GPUToCUDA/lower-nvvm-kernel-to-cubin.mlir b/mlir/test/Conversion/GPUToCUDA/lower-nvvm-kernel-to-cubin.mlir
index d103031fbd54..c5c92da3cac0 100644
--- a/mlir/test/Conversion/GPUToCUDA/lower-nvvm-kernel-to-cubin.mlir
+++ b/mlir/test/Conversion/GPUToCUDA/lower-nvvm-kernel-to-cubin.mlir
@@ -2,7 +2,7 @@
 
 // CHECK: attributes {nvvm.cubin = "CUBIN"}
 gpu.module @foo {
-  llvm.func @kernel(%arg0 : !llvm.float, %arg1 : !llvm.ptr<float>)
+  llvm.func @kernel(%arg0 : f32, %arg1 : !llvm.ptr<f32>)
     // CHECK: attributes  {gpu.kernel}
     attributes  { gpu.kernel } {
     llvm.return

diff  --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index 3a48f2e759af..e740aabaee99 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -110,7 +110,7 @@ gpu.module @test_module {
 gpu.module @test_module {
   // CHECK-LABEL: func @gpu_shuffle()
   func @gpu_shuffle() -> (f32) {
-    // CHECK: %[[#VALUE:]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
+    // CHECK: %[[#VALUE:]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
     %arg0 = constant 1.0 : f32
     // CHECK: %[[#OFFSET:]] = llvm.mlir.constant(4 : i32) : i32
     %arg1 = constant 4 : i32
@@ -120,9 +120,9 @@ gpu.module @test_module {
     // CHECK: %[[#SHL:]] = llvm.shl %[[#ONE]], %[[#WIDTH]] : i32
     // CHECK: %[[#MASK:]] = llvm.sub %[[#SHL]], %[[#ONE]] : i32
     // CHECK: %[[#CLAMP:]] = llvm.sub %[[#WIDTH]], %[[#ONE]] : i32
-    // CHECK: %[[#SHFL:]] = nvvm.shfl.sync.bfly %[[#MASK]], %[[#VALUE]], %[[#OFFSET]], %[[#CLAMP]] : !llvm.struct<(float, i1)>
-    // CHECK: llvm.extractvalue %[[#SHFL]][0 : index] : !llvm.struct<(float, i1)>
-    // CHECK: llvm.extractvalue %[[#SHFL]][1 : index] : !llvm.struct<(float, i1)>
+    // CHECK: %[[#SHFL:]] = nvvm.shfl.sync.bfly %[[#MASK]], %[[#VALUE]], %[[#OFFSET]], %[[#CLAMP]] : !llvm.struct<(f32, i1)>
+    // CHECK: llvm.extractvalue %[[#SHFL]][0 : index] : !llvm.struct<(f32, i1)>
+    // CHECK: llvm.extractvalue %[[#SHFL]][1 : index] : !llvm.struct<(f32, i1)>
     %shfl, %pred = "gpu.shuffle"(%arg0, %arg1, %arg2) { mode = "xor" } : (f32, i32, i32) -> (f32, i1)
 
     std.return %shfl : f32
@@ -143,14 +143,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_fabsf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_fabs(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_fabsf(f32) -> f32
+  // CHECK: llvm.func @__nv_fabs(f64) -> f64
   // CHECK-LABEL: func @gpu_fabs
   func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.absf %arg_f32 : f32
-    // CHECK: llvm.call @__nv_fabsf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_fabsf(%{{.*}}) : (f32) -> f32
     %result64 = std.absf %arg_f64 : f64
-    // CHECK: llvm.call @__nv_fabs(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_fabs(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -158,14 +158,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_ceilf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_ceil(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_ceilf(f32) -> f32
+  // CHECK: llvm.func @__nv_ceil(f64) -> f64
   // CHECK-LABEL: func @gpu_ceil
   func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.ceilf %arg_f32 : f32
-    // CHECK: llvm.call @__nv_ceilf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_ceilf(%{{.*}}) : (f32) -> f32
     %result64 = std.ceilf %arg_f64 : f64
-    // CHECK: llvm.call @__nv_ceil(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_ceil(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -173,14 +173,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_floorf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_floor(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_floorf(f32) -> f32
+  // CHECK: llvm.func @__nv_floor(f64) -> f64
   // CHECK-LABEL: func @gpu_floor
   func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.floorf %arg_f32 : f32
-    // CHECK: llvm.call @__nv_floorf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_floorf(%{{.*}}) : (f32) -> f32
     %result64 = std.floorf %arg_f64 : f64
-    // CHECK: llvm.call @__nv_floor(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_floor(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -188,28 +188,28 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_cosf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_cos(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_cosf(f32) -> f32
+  // CHECK: llvm.func @__nv_cos(f64) -> f64
   // CHECK-LABEL: func @gpu_cos
   func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.cos %arg_f32 : f32
-    // CHECK: llvm.call @__nv_cosf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_cosf(%{{.*}}) : (f32) -> f32
     %result64 = std.cos %arg_f64 : f64
-    // CHECK: llvm.call @__nv_cos(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_cos(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
 
 // -----
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_expf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_exp(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_expf(f32) -> f32
+  // CHECK: llvm.func @__nv_exp(f64) -> f64
   // CHECK-LABEL: func @gpu_exp
   func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.exp %arg_f32 : f32
-    // CHECK: llvm.call @__nv_expf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_expf(%{{.*}}) : (f32) -> f32
     %result64 = std.exp %arg_f64 : f64
-    // CHECK: llvm.call @__nv_exp(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_exp(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -217,14 +217,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_logf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_log(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_logf(f32) -> f32
+  // CHECK: llvm.func @__nv_log(f64) -> f64
   // CHECK-LABEL: func @gpu_log
   func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.log %arg_f32 : f32
-    // CHECK: llvm.call @__nv_logf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_logf(%{{.*}}) : (f32) -> f32
     %result64 = std.log %arg_f64 : f64
-    // CHECK: llvm.call @__nv_log(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_log(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -232,14 +232,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_log10f(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_log10(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_log10f(f32) -> f32
+  // CHECK: llvm.func @__nv_log10(f64) -> f64
   // CHECK-LABEL: func @gpu_log10
   func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.log10 %arg_f32 : f32
-    // CHECK: llvm.call @__nv_log10f(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_log10f(%{{.*}}) : (f32) -> f32
     %result64 = std.log10 %arg_f64 : f64
-    // CHECK: llvm.call @__nv_log10(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_log10(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -247,14 +247,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_log2f(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_log2(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_log2f(f32) -> f32
+  // CHECK: llvm.func @__nv_log2(f64) -> f64
   // CHECK-LABEL: func @gpu_log2
   func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.log2 %arg_f32 : f32
-    // CHECK: llvm.call @__nv_log2f(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_log2f(%{{.*}}) : (f32) -> f32
     %result64 = std.log2 %arg_f64 : f64
-    // CHECK: llvm.call @__nv_log2(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_log2(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -262,14 +262,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_sinf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_sin(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_sinf(f32) -> f32
+  // CHECK: llvm.func @__nv_sin(f64) -> f64
   // CHECK-LABEL: func @gpu_sin
   func @gpu_sin(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.sin %arg_f32 : f32
-    // CHECK: llvm.call @__nv_sinf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_sinf(%{{.*}}) : (f32) -> f32
     %result64 = std.sin %arg_f64 : f64
-    // CHECK: llvm.call @__nv_sin(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_sin(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -277,18 +277,18 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_tanhf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_tanh(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_tanhf(f32) -> f32
+  // CHECK: llvm.func @__nv_tanh(f64) -> f64
   // CHECK-LABEL: func @gpu_tanh
   func @gpu_tanh(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) -> (f16, f32, f64) {
     %result16 = std.tanh %arg_f16 : f16
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK-NEXT: llvm.call @__nv_tanhf(%{{.*}}) : (!llvm.float) -> !llvm.float
-    // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK-NEXT: llvm.call @__nv_tanhf(%{{.*}}) : (f32) -> f32
+    // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
     %result32 = std.tanh %arg_f32 : f32
-    // CHECK: llvm.call @__nv_tanhf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_tanhf(%{{.*}}) : (f32) -> f32
     %result64 = std.tanh %arg_f64 : f64
-    // CHECK: llvm.call @__nv_tanh(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_tanh(%{{.*}}) : (f64) -> f64
     std.return %result16, %result32, %result64 : f16, f32, f64
   }
 }
@@ -296,19 +296,19 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_rsqrtf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_rsqrt(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_rsqrtf(f32) -> f32
+  // CHECK: llvm.func @__nv_rsqrt(f64) -> f64
   // CHECK-LABEL: func @gpu_rsqrt
   func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
       -> (f16, f32, f64) {
     %result16 = std.rsqrt %arg_f16 : f16
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK-NEXT: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float
-    // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK-NEXT: llvm.call @__nv_rsqrtf(%{{.*}}) : (f32) -> f32
+    // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
     %result32 = std.rsqrt %arg_f32 : f32
-    // CHECK: llvm.call @__nv_rsqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_rsqrtf(%{{.*}}) : (f32) -> f32
     %result64 = std.rsqrt %arg_f64 : f64
-    // CHECK: llvm.call @__nv_rsqrt(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_rsqrt(%{{.*}}) : (f64) -> f64
     std.return %result16, %result32, %result64 : f16, f32, f64
   }
 }
@@ -316,19 +316,19 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_sqrtf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_sqrt(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_sqrtf(f32) -> f32
+  // CHECK: llvm.func @__nv_sqrt(f64) -> f64
   // CHECK-LABEL: func @gpu_sqrt
   func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
       -> (f16, f32, f64) {
     %result16 = std.sqrt %arg_f16 : f16
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK-NEXT: llvm.call @__nv_sqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float
-    // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK-NEXT: llvm.call @__nv_sqrtf(%{{.*}}) : (f32) -> f32
+    // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
     %result32 = std.sqrt %arg_f32 : f32
-    // CHECK: llvm.call @__nv_sqrtf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_sqrtf(%{{.*}}) : (f32) -> f32
     %result64 = std.sqrt %arg_f64 : f64
-    // CHECK: llvm.call @__nv_sqrt(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_sqrt(%{{.*}}) : (f64) -> f64
     std.return %result16, %result32, %result64 : f16, f32, f64
   }
 }
@@ -336,19 +336,19 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_atanf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_atan(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_atanf(f32) -> f32
+  // CHECK: llvm.func @__nv_atan(f64) -> f64
   // CHECK-LABEL: func @gpu_atan
   func @gpu_atan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
       -> (f16, f32, f64) {
     %result16 = std.atan %arg_f16 : f16
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK-NEXT: llvm.call @__nv_atanf(%{{.*}}) : (!llvm.float) -> !llvm.float
-    // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK-NEXT: llvm.call @__nv_atanf(%{{.*}}) : (f32) -> f32
+    // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
     %result32 = std.atan %arg_f32 : f32
-    // CHECK: llvm.call @__nv_atanf(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_atanf(%{{.*}}) : (f32) -> f32
     %result64 = std.atan %arg_f64 : f64
-    // CHECK: llvm.call @__nv_atan(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_atan(%{{.*}}) : (f64) -> f64
     std.return %result16, %result32, %result64 : f16, f32, f64
   }
 }
@@ -356,20 +356,20 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_atan2f(!llvm.float, !llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_atan2(!llvm.double, !llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_atan2f(f32, f32) -> f32
+  // CHECK: llvm.func @__nv_atan2(f64, f64) -> f64
   // CHECK-LABEL: func @gpu_atan2
   func @gpu_atan2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
       -> (f16, f32, f64) {
     %result16 = std.atan2 %arg_f16, %arg_f16 : f16
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK-NEXT: llvm.call @__nv_atan2f(%{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float
-    // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK-NEXT: llvm.call @__nv_atan2f(%{{.*}}) : (f32, f32) -> f32
+    // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
     %result32 = std.atan2 %arg_f32, %arg_f32 : f32
-    // CHECK: llvm.call @__nv_atan2f(%{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_atan2f(%{{.*}}) : (f32, f32) -> f32
     %result64 = std.atan2 %arg_f64, %arg_f64 : f64
-    // CHECK: llvm.call @__nv_atan2(%{{.*}}) : (!llvm.double, !llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_atan2(%{{.*}}) : (f64, f64) -> f64
     std.return %result16, %result32, %result64 : f16, f32, f64
   }
 }
@@ -380,14 +380,14 @@ gpu.module @test_module {
 gpu.module @test_module {
   "test.symbol_scope"() ({
   // CHECK: test.symbol_scope
-  // CHECK: llvm.func @__nv_expf(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_exp(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_expf(f32) -> f32
+  // CHECK: llvm.func @__nv_exp(f64) -> f64
   // CHECK-LABEL: func @gpu_exp
     func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
       %result32 = std.exp %arg_f32 : f32
-      // CHECK: llvm.call @__nv_expf(%{{.*}}) : (!llvm.float) -> !llvm.float
+      // CHECK: llvm.call @__nv_expf(%{{.*}}) : (f32) -> f32
       %result64 = std.exp %arg_f64 : f64
-      // CHECK: llvm.call @__nv_exp(%{{.*}}) : (!llvm.double) -> !llvm.double
+      // CHECK: llvm.call @__nv_exp(%{{.*}}) : (f64) -> f64
       std.return %result32, %result64 : f32, f64
     }
     "test.finish" () : () -> ()
@@ -397,14 +397,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__nv_powf(!llvm.float, !llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__nv_pow(!llvm.double, !llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__nv_powf(f32, f32) -> f32
+  // CHECK: llvm.func @__nv_pow(f64, f64) -> f64
   // CHECK-LABEL: func @gpu_pow
   func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.powf %arg_f32, %arg_f32 : f32
-    // CHECK: llvm.call @__nv_powf(%{{.*}}, %{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__nv_powf(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
     %result64 = std.powf %arg_f64, %arg_f64 : f64
-    // CHECK: llvm.call @__nv_pow(%{{.*}}, %{{.*}}) : (!llvm.double, !llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__nv_pow(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }

diff  --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
index a65bef80e363..b3613503531d 100644
--- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
+++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
@@ -85,14 +85,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_fabs_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_fabs_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_fabs_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_fabs_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_fabs
   func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.absf %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.absf %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_fabs_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_fabs_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -100,14 +100,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_ceil_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_ceil_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_ceil_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_ceil_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_ceil
   func @gpu_ceil(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.ceilf %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_ceil_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.ceilf %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_ceil_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_ceil_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -115,14 +115,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_floor_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_floor_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_floor_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_floor_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_floor
   func @gpu_floor(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.floorf %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_floor_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.floorf %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_floor_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_floor_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -130,30 +130,30 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_cos_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_cos_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_cos_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_cos_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_cos
   func @gpu_cos(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.cos %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_cos_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.cos %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_cos_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_cos_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
 
 // -----
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_exp_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_exp_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_exp_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_exp_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_exp
   func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %exp_f32 = std.exp %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32
     %result32 = std.exp %exp_f32 : f32
-    // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.exp %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -165,16 +165,16 @@ gpu.module @test_module {
 gpu.module @test_module {
   "test.symbol_scope"() ({
     // CHECK: test.symbol_scope
-    // CHECK: llvm.func @__ocml_exp_f32(!llvm.float) -> !llvm.float
-    // CHECK: llvm.func @__ocml_exp_f64(!llvm.double) -> !llvm.double
+    // CHECK: llvm.func @__ocml_exp_f32(f32) -> f32
+    // CHECK: llvm.func @__ocml_exp_f64(f64) -> f64
     // CHECK-LABEL: func @gpu_exp
     func @gpu_exp(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
       %exp_f32 = std.exp %arg_f32 : f32
-      // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+      // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32
       %result32 = std.exp %exp_f32 : f32
-      // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+      // CHECK: llvm.call @__ocml_exp_f32(%{{.*}}) : (f32) -> f32
       %result64 = std.exp %arg_f64 : f64
-      // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+      // CHECK: llvm.call @__ocml_exp_f64(%{{.*}}) : (f64) -> f64
       std.return %result32, %result64 : f32, f64
     }
     "test.finish" () : () -> ()
@@ -184,14 +184,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_log_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_log_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_log_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_log_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_log
   func @gpu_log(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.log %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_log_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_log_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.log %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_log_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_log_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -199,14 +199,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_log10_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_log10_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_log10_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_log10_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_log10
   func @gpu_log10(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.log10 %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_log10_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.log10 %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_log10_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_log10_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -214,14 +214,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_log2_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_log2_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_log2_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_log2_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_log2
   func @gpu_log2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.log2 %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_log2_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.log2 %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_log2_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_log2_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -229,19 +229,19 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_rsqrt_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_rsqrt_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_rsqrt_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_rsqrt_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_rsqrt
   func @gpu_rsqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
       -> (f16, f32, f64) {
     %result16 = std.rsqrt %arg_f16 : f16
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK-NEXT: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
-    // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK-NEXT: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (f32) -> f32
+    // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
     %result32 = std.rsqrt %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_rsqrt_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.rsqrt %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_rsqrt_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_rsqrt_f64(%{{.*}}) : (f64) -> f64
     std.return %result16, %result32, %result64 : f16, f32, f64
   }
 }
@@ -249,19 +249,19 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_sqrt_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_sqrt_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_sqrt_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_sqrt_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_sqrt
   func @gpu_sqrt(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64)
       -> (f16, f32, f64) {
     %result16 = std.sqrt %arg_f16 : f16
-    // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float
-    // CHECK-NEXT: llvm.call @__ocml_sqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
-    // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half
+    // CHECK: llvm.fpext %{{.*}} : f16 to f32
+    // CHECK-NEXT: llvm.call @__ocml_sqrt_f32(%{{.*}}) : (f32) -> f32
+    // CHECK-NEXT: llvm.fptrunc %{{.*}} : f32 to f16
     %result32 = std.sqrt %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_sqrt_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_sqrt_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.sqrt %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_sqrt_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_sqrt_f64(%{{.*}}) : (f64) -> f64
     std.return %result16, %result32, %result64 : f16, f32, f64
   }
 }
@@ -269,14 +269,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_tanh_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_tanh_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_tanh_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_tanh_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_tanh
   func @gpu_tanh(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.tanh %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_tanh_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.tanh %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_tanh_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -284,14 +284,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_atan_f32(!llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_atan_f64(!llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_atan_f32(f32) -> f32
+  // CHECK: llvm.func @__ocml_atan_f64(f64) -> f64
   // CHECK-LABEL: func @gpu_atan
   func @gpu_atan(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.atan %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_atan_f32(%{{.*}}) : (!llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_atan_f32(%{{.*}}) : (f32) -> f32
     %result64 = std.atan %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_atan_f64(%{{.*}}) : (!llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_atan_f64(%{{.*}}) : (f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -299,14 +299,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_atan2_f32(!llvm.float, !llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_atan2_f64(!llvm.double, !llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_atan2_f32(f32, f32) -> f32
+  // CHECK: llvm.func @__ocml_atan2_f64(f64, f64) -> f64
   // CHECK-LABEL: func @gpu_atan2
   func @gpu_atan2(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.atan2 %arg_f32, %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_atan2_f32(%{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_atan2_f32(%{{.*}}) : (f32, f32) -> f32
     %result64 = std.atan2 %arg_f64, %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_atan2_f64(%{{.*}}) : (!llvm.double, !llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_atan2_f64(%{{.*}}) : (f64, f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }
@@ -314,14 +314,14 @@ gpu.module @test_module {
 // -----
 
 gpu.module @test_module {
-  // CHECK: llvm.func @__ocml_pow_f32(!llvm.float, !llvm.float) -> !llvm.float
-  // CHECK: llvm.func @__ocml_pow_f64(!llvm.double, !llvm.double) -> !llvm.double
+  // CHECK: llvm.func @__ocml_pow_f32(f32, f32) -> f32
+  // CHECK: llvm.func @__ocml_pow_f64(f64, f64) -> f64
   // CHECK-LABEL: func @gpu_pow
   func @gpu_pow(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
     %result32 = std.powf %arg_f32, %arg_f32 : f32
-    // CHECK: llvm.call @__ocml_pow_f32(%{{.*}}, %{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float
+    // CHECK: llvm.call @__ocml_pow_f32(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
     %result64 = std.powf %arg_f64, %arg_f64 : f64
-    // CHECK: llvm.call @__ocml_pow_f64(%{{.*}}, %{{.*}}) : (!llvm.double, !llvm.double) -> !llvm.double
+    // CHECK: llvm.call @__ocml_pow_f64(%{{.*}}, %{{.*}}) : (f64, f64) -> f64
     std.return %result32, %result64 : f32, f64
   }
 }

diff  --git a/mlir/test/Conversion/GPUToROCm/lower-rocdl-kernel-to-hsaco.mlir b/mlir/test/Conversion/GPUToROCm/lower-rocdl-kernel-to-hsaco.mlir
index d88f842a2186..3d7deb906e77 100644
--- a/mlir/test/Conversion/GPUToROCm/lower-rocdl-kernel-to-hsaco.mlir
+++ b/mlir/test/Conversion/GPUToROCm/lower-rocdl-kernel-to-hsaco.mlir
@@ -2,7 +2,7 @@
 
 // CHECK: attributes {rocdl.hsaco = "HSACO"}
 gpu.module @foo {
-  llvm.func @kernel(%arg0 : !llvm.float, %arg1 : !llvm.ptr<float>)
+  llvm.func @kernel(%arg0 : f32, %arg1 : !llvm.ptr<f32>)
     // CHECK: attributes  {gpu.kernel}
     attributes  { gpu.kernel } {
     llvm.return

diff  --git a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
index e66770265938..38796013b6e9 100644
--- a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
+++ b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
@@ -6,7 +6,7 @@
 // CHECK: %[[addressof_SPIRV_BIN:.*]] = llvm.mlir.addressof @SPIRV_BIN
 // CHECK: %[[SPIRV_BIN_ptr:.*]] = llvm.getelementptr %[[addressof_SPIRV_BIN]]
 // CHECK: %[[SPIRV_BIN_size:.*]] = llvm.mlir.constant
-// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, i32, i32, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void
+// CHECK: llvm.call @bindMemRef1DFloat(%[[Vulkan_Runtime_ptr]], %{{.*}}, %{{.*}}, %{{.*}}) : (!llvm.ptr<i8>, i32, i32, !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>>) -> !llvm.void
 // CHECK: llvm.call @setBinaryShader(%[[Vulkan_Runtime_ptr]], %[[SPIRV_BIN_ptr]], %[[SPIRV_BIN_size]]) : (!llvm.ptr<i8>, !llvm.ptr<i8>, i32) -> !llvm.void
 // CHECK: %[[addressof_entry_point:.*]] = llvm.mlir.addressof @kernel_spv_entry_point_name
 // CHECK: %[[entry_point_ptr:.*]] = llvm.getelementptr %[[addressof_entry_point]]
@@ -21,43 +21,43 @@ module attributes {gpu.container_module} {
   llvm.func @malloc(i64) -> !llvm.ptr<i8>
   llvm.func @foo() {
     %0 = llvm.mlir.constant(12 : index) : i64
-    %1 = llvm.mlir.null : !llvm.ptr<float>
+    %1 = llvm.mlir.null : !llvm.ptr<f32>
     %2 = llvm.mlir.constant(1 : index) : i64
-    %3 = llvm.getelementptr %1[%2] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-    %4 = llvm.ptrtoint %3 : !llvm.ptr<float> to i64
+    %3 = llvm.getelementptr %1[%2] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+    %4 = llvm.ptrtoint %3 : !llvm.ptr<f32> to i64
     %5 = llvm.mul %0, %4 : i64
     %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr<i8>
-    %7 = llvm.bitcast %6 : !llvm.ptr<i8> to !llvm.ptr<float>
-    %8 = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %9 = llvm.insertvalue %7, %8[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %10 = llvm.insertvalue %7, %9[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+    %7 = llvm.bitcast %6 : !llvm.ptr<i8> to !llvm.ptr<f32>
+    %8 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %9 = llvm.insertvalue %7, %8[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %10 = llvm.insertvalue %7, %9[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
     %11 = llvm.mlir.constant(0 : index) : i64
-    %12 = llvm.insertvalue %11, %10[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+    %12 = llvm.insertvalue %11, %10[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
     %13 = llvm.mlir.constant(1 : index) : i64
-    %14 = llvm.insertvalue %0, %12[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %15 = llvm.insertvalue %13, %14[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+    %14 = llvm.insertvalue %0, %12[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %15 = llvm.insertvalue %13, %14[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
     %16 = llvm.mlir.constant(1 : index) : i64
-    %17 = llvm.extractvalue %15[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %18 = llvm.extractvalue %15[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %19 = llvm.extractvalue %15[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %20 = llvm.extractvalue %15[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %21 = llvm.extractvalue %15[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+    %17 = llvm.extractvalue %15[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %18 = llvm.extractvalue %15[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %19 = llvm.extractvalue %15[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %20 = llvm.extractvalue %15[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %21 = llvm.extractvalue %15[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
     llvm.call @vulkanLaunch(%16, %16, %16, %17, %18, %19, %20, %21) {spirv_blob = "\03\02#\07\00", spirv_entry_point = "kernel"}
-    : (i64, i64, i64, !llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64) -> ()
+    : (i64, i64, i64, !llvm.ptr<f32>, !llvm.ptr<f32>, i64, i64, i64) -> ()
     llvm.return
   }
-  llvm.func @vulkanLaunch(%arg0: i64, %arg1: i64, %arg2: i64, %arg6: !llvm.ptr<float>, %arg7: !llvm.ptr<float>, %arg8: i64, %arg9: i64, %arg10: i64) {
-    %0 = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %1 = llvm.insertvalue %arg6, %0[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %2 = llvm.insertvalue %arg7, %1[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %3 = llvm.insertvalue %arg8, %2[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %4 = llvm.insertvalue %arg9, %3[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-    %5 = llvm.insertvalue %arg10, %4[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+  llvm.func @vulkanLaunch(%arg0: i64, %arg1: i64, %arg2: i64, %arg6: !llvm.ptr<f32>, %arg7: !llvm.ptr<f32>, %arg8: i64, %arg9: i64, %arg10: i64) {
+    %0 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %1 = llvm.insertvalue %arg6, %0[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %2 = llvm.insertvalue %arg7, %1[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %3 = llvm.insertvalue %arg8, %2[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %4 = llvm.insertvalue %arg9, %3[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+    %5 = llvm.insertvalue %arg10, %4[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
     %6 = llvm.mlir.constant(1 : index) : i64
-    %7 = llvm.alloca %6 x !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>
-    llvm.store %5, %7 : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>
-    llvm.call @_mlir_ciface_vulkanLaunch(%arg0, %arg1, %arg2, %7) : (i64, i64, i64, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>) -> ()
+    %7 = llvm.alloca %6 x !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>>
+    llvm.store %5, %7 : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>>
+    llvm.call @_mlir_ciface_vulkanLaunch(%arg0, %arg1, %arg2, %7) : (i64, i64, i64, !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>>) -> ()
     llvm.return
   }
-  llvm.func @_mlir_ciface_vulkanLaunch(i64, i64, i64, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>)
+  llvm.func @_mlir_ciface_vulkanLaunch(i64, i64, i64, !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>>)
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
index f88e901443df..0e8dfab78855 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
@@ -60,14 +60,14 @@ spv.func @imul_vector(%arg0: vector<3xi32>, %arg1: vector<3xi32>) "None" {
 
 // CHECK-LABEL: @fadd_scalar
 spv.func @fadd_scalar(%arg0: f16, %arg1: f16) "None" {
-  // CHECK: llvm.fadd %{{.*}}, %{{.*}} : !llvm.half
+  // CHECK: llvm.fadd %{{.*}}, %{{.*}} : f16
   %0 = spv.FAdd %arg0, %arg1 : f16
   spv.Return
 }
 
 // CHECK-LABEL: @fadd_vector
 spv.func @fadd_vector(%arg0: vector<4xf32>, %arg1: vector<4xf32>) "None" {
-  // CHECK: llvm.fadd %{{.*}}, %{{.*}} : !llvm.vec<4 x float>
+  // CHECK: llvm.fadd %{{.*}}, %{{.*}} : !llvm.vec<4 x f32>
   %0 = spv.FAdd %arg0, %arg1 : vector<4xf32>
   spv.Return
 }
@@ -78,14 +78,14 @@ spv.func @fadd_vector(%arg0: vector<4xf32>, %arg1: vector<4xf32>) "None" {
 
 // CHECK-LABEL: @fsub_scalar
 spv.func @fsub_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.fsub %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.fsub %{{.*}}, %{{.*}} : f32
   %0 = spv.FSub %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @fsub_vector
 spv.func @fsub_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
-  // CHECK: llvm.fsub %{{.*}}, %{{.*}} : !llvm.vec<2 x float>
+  // CHECK: llvm.fsub %{{.*}}, %{{.*}} : !llvm.vec<2 x f32>
   %0 = spv.FSub %arg0, %arg1 : vector<2xf32>
   spv.Return
 }
@@ -96,14 +96,14 @@ spv.func @fsub_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
 
 // CHECK-LABEL: @fdiv_scalar
 spv.func @fdiv_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : f32
   %0 = spv.FDiv %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @fdiv_vector
 spv.func @fdiv_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
-  // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : !llvm.vec<3 x double>
+  // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : !llvm.vec<3 x f64>
   %0 = spv.FDiv %arg0, %arg1 : vector<3xf64>
   spv.Return
 }
@@ -114,14 +114,14 @@ spv.func @fdiv_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
 
 // CHECK-LABEL: @fmul_scalar
 spv.func @fmul_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.fmul %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.fmul %{{.*}}, %{{.*}} : f32
   %0 = spv.FMul %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @fmul_vector
 spv.func @fmul_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
-  // CHECK: llvm.fmul %{{.*}}, %{{.*}} : !llvm.vec<2 x float>
+  // CHECK: llvm.fmul %{{.*}}, %{{.*}} : !llvm.vec<2 x f32>
   %0 = spv.FMul %arg0, %arg1 : vector<2xf32>
   spv.Return
 }
@@ -132,14 +132,14 @@ spv.func @fmul_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
 
 // CHECK-LABEL: @frem_scalar
 spv.func @frem_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.frem %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.frem %{{.*}}, %{{.*}} : f32
   %0 = spv.FRem %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @frem_vector
 spv.func @frem_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
-  // CHECK: llvm.frem %{{.*}}, %{{.*}} : !llvm.vec<3 x double>
+  // CHECK: llvm.frem %{{.*}}, %{{.*}} : !llvm.vec<3 x f64>
   %0 = spv.FRem %arg0, %arg1 : vector<3xf64>
   spv.Return
 }
@@ -150,14 +150,14 @@ spv.func @frem_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
 
 // CHECK-LABEL: @fneg_scalar
 spv.func @fneg_scalar(%arg: f64) "None" {
-  // CHECK: llvm.fneg %{{.*}} : !llvm.double
+  // CHECK: llvm.fneg %{{.*}} : f64
   %0 = spv.FNegate %arg : f64
   spv.Return
 }
 
 // CHECK-LABEL: @fneg_vector
 spv.func @fneg_vector(%arg: vector<2xf32>) "None" {
-  // CHECK: llvm.fneg %{{.*}} : !llvm.vec<2 x float>
+  // CHECK: llvm.fneg %{{.*}} : !llvm.vec<2 x f32>
   %0 = spv.FNegate %arg : vector<2xf32>
   spv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
index f303c3b8bc9e..8f67a5fcab70 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
@@ -6,42 +6,42 @@
 
 // CHECK-LABEL: @bitcast_float_to_integer_scalar
 spv.func @bitcast_float_to_integer_scalar(%arg0 : f32) "None" {
-  // CHECK: llvm.bitcast {{.*}} : !llvm.float to i32
+  // CHECK: llvm.bitcast {{.*}} : f32 to i32
   %0 = spv.Bitcast %arg0: f32 to i32
   spv.Return
 }
 
 // CHECK-LABEL: @bitcast_float_to_integer_vector
 spv.func @bitcast_float_to_integer_vector(%arg0 : vector<3xf32>) "None" {
-  // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<3 x float> to !llvm.vec<3 x i32>
+  // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<3 x f32> to !llvm.vec<3 x i32>
   %0 = spv.Bitcast %arg0: vector<3xf32> to vector<3xi32>
   spv.Return
 }
 
 // CHECK-LABEL: @bitcast_vector_to_scalar
 spv.func @bitcast_vector_to_scalar(%arg0 : vector<2xf32>) "None" {
-  // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<2 x float> to i64
+  // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<2 x f32> to i64
   %0 = spv.Bitcast %arg0: vector<2xf32> to i64
   spv.Return
 }
 
 // CHECK-LABEL: @bitcast_scalar_to_vector
 spv.func @bitcast_scalar_to_vector(%arg0 : f64) "None" {
-  // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.double to !llvm.vec<2 x i32>
+  // CHECK: {{.*}} = llvm.bitcast {{.*}} : f64 to !llvm.vec<2 x i32>
   %0 = spv.Bitcast %arg0: f64 to vector<2xi32>
   spv.Return
 }
 
 // CHECK-LABEL: @bitcast_vector_to_vector
 spv.func @bitcast_vector_to_vector(%arg0 : vector<4xf32>) "None" {
-  // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<4 x float> to !llvm.vec<2 x i64>
+  // CHECK: {{.*}} = llvm.bitcast {{.*}} : !llvm.vec<4 x f32> to !llvm.vec<2 x i64>
   %0 = spv.Bitcast %arg0: vector<4xf32> to vector<2xi64>
   spv.Return
 }
 
 // CHECK-LABEL: @bitcast_pointer
 spv.func @bitcast_pointer(%arg0: !spv.ptr<f32, Function>) "None" {
-  // CHECK: llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<i32>
+  // CHECK: llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<i32>
   %0 = spv.Bitcast %arg0 : !spv.ptr<f32, Function> to !spv.ptr<i32, Function>
   spv.Return
 }
@@ -52,14 +52,14 @@ spv.func @bitcast_pointer(%arg0: !spv.ptr<f32, Function>) "None" {
 
 // CHECK-LABEL: @convert_float_to_signed_scalar
 spv.func @convert_float_to_signed_scalar(%arg0: f32) "None" {
-  // CHECK: llvm.fptosi %{{.*}} : !llvm.float to i32
+  // CHECK: llvm.fptosi %{{.*}} : f32 to i32
   %0 = spv.ConvertFToS %arg0: f32 to i32
   spv.Return
 }
 
 // CHECK-LABEL: @convert_float_to_signed_vector
 spv.func @convert_float_to_signed_vector(%arg0: vector<2xf32>) "None" {
-  // CHECK: llvm.fptosi %{{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x i32>
+  // CHECK: llvm.fptosi %{{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x i32>
     %0 = spv.ConvertFToS %arg0: vector<2xf32> to vector<2xi32>
   spv.Return
 }
@@ -70,14 +70,14 @@ spv.func @convert_float_to_signed_vector(%arg0: vector<2xf32>) "None" {
 
 // CHECK-LABEL: @convert_float_to_unsigned_scalar
 spv.func @convert_float_to_unsigned_scalar(%arg0: f32) "None" {
-  // CHECK: llvm.fptoui %{{.*}} : !llvm.float to i32
+  // CHECK: llvm.fptoui %{{.*}} : f32 to i32
   %0 = spv.ConvertFToU %arg0: f32 to i32
   spv.Return
 }
 
 // CHECK-LABEL: @convert_float_to_unsigned_vector
 spv.func @convert_float_to_unsigned_vector(%arg0: vector<2xf32>) "None" {
-  // CHECK: llvm.fptoui %{{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x i32>
+  // CHECK: llvm.fptoui %{{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x i32>
     %0 = spv.ConvertFToU %arg0: vector<2xf32> to vector<2xi32>
   spv.Return
 }
@@ -88,14 +88,14 @@ spv.func @convert_float_to_unsigned_vector(%arg0: vector<2xf32>) "None" {
 
 // CHECK-LABEL: @convert_signed_to_float_scalar
 spv.func @convert_signed_to_float_scalar(%arg0: i32) "None" {
-  // CHECK: llvm.sitofp %{{.*}} : i32 to !llvm.float
+  // CHECK: llvm.sitofp %{{.*}} : i32 to f32
   %0 = spv.ConvertSToF %arg0: i32 to f32
   spv.Return
 }
 
 // CHECK-LABEL: @convert_signed_to_float_vector
 spv.func @convert_signed_to_float_vector(%arg0: vector<3xi32>) "None" {
-  // CHECK: llvm.sitofp %{{.*}} : !llvm.vec<3 x i32> to !llvm.vec<3 x float>
+  // CHECK: llvm.sitofp %{{.*}} : !llvm.vec<3 x i32> to !llvm.vec<3 x f32>
     %0 = spv.ConvertSToF %arg0: vector<3xi32> to vector<3xf32>
   spv.Return
 }
@@ -106,14 +106,14 @@ spv.func @convert_signed_to_float_vector(%arg0: vector<3xi32>) "None" {
 
 // CHECK-LABEL: @convert_unsigned_to_float_scalar
 spv.func @convert_unsigned_to_float_scalar(%arg0: i32) "None" {
-  // CHECK: llvm.uitofp %{{.*}} : i32 to !llvm.float
+  // CHECK: llvm.uitofp %{{.*}} : i32 to f32
   %0 = spv.ConvertUToF %arg0: i32 to f32
   spv.Return
 }
 
 // CHECK-LABEL: @convert_unsigned_to_float_vector
 spv.func @convert_unsigned_to_float_vector(%arg0: vector<3xi32>) "None" {
-  // CHECK: llvm.uitofp %{{.*}} : !llvm.vec<3 x i32> to !llvm.vec<3 x float>
+  // CHECK: llvm.uitofp %{{.*}} : !llvm.vec<3 x i32> to !llvm.vec<3 x f32>
     %0 = spv.ConvertUToF %arg0: vector<3xi32> to vector<3xf32>
   spv.Return
 }
@@ -124,20 +124,20 @@ spv.func @convert_unsigned_to_float_vector(%arg0: vector<3xi32>) "None" {
 
 // CHECK-LABEL: @fconvert_scalar
 spv.func @fconvert_scalar(%arg0: f32, %arg1: f64) "None" {
-  // CHECK: llvm.fpext %{{.*}} : !llvm.float to !llvm.double
+  // CHECK: llvm.fpext %{{.*}} : f32 to f64
   %0 = spv.FConvert %arg0: f32 to f64
 
-  // CHECK: llvm.fptrunc %{{.*}} : !llvm.double to !llvm.float
+  // CHECK: llvm.fptrunc %{{.*}} : f64 to f32
   %1 = spv.FConvert %arg1: f64 to f32
   spv.Return
 }
 
 // CHECK-LABEL: @fconvert_vector
 spv.func @fconvert_vector(%arg0: vector<2xf32>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fpext %{{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x double>
+  // CHECK: llvm.fpext %{{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x f64>
   %0 = spv.FConvert %arg0: vector<2xf32> to vector<2xf64>
 
-  // CHECK: llvm.fptrunc %{{.*}} : !llvm.vec<2 x double> to !llvm.vec<2 x float>
+  // CHECK: llvm.fptrunc %{{.*}} : !llvm.vec<2 x f64> to !llvm.vec<2 x f32>
   %1 = spv.FConvert %arg1: vector<2xf64> to vector<2xf32>
   spv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
index ef50c05346ed..632136c5ede0 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
@@ -186,14 +186,14 @@ spv.func @u_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None"
 
 // CHECK-LABEL: @f_ord_equal_scalar
 spv.func @f_ord_equal_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : f32
   %0 = spv.FOrdEqual %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @f_ord_equal_vector
 spv.func @f_ord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
-  // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : !llvm.vec<4 x double>
+  // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : !llvm.vec<4 x f64>
   %0 = spv.FOrdEqual %arg0, %arg1 : vector<4xf64>
   spv.Return
 }
@@ -204,14 +204,14 @@ spv.func @f_ord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None"
 
 // CHECK-LABEL: @f_ord_greater_than_equal_scalar
 spv.func @f_ord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : f64
   %0 = spv.FOrdGreaterThanEqual %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_ord_greater_than_equal_vector
 spv.func @f_ord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FOrdGreaterThanEqual %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -222,14 +222,14 @@ spv.func @f_ord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2x
 
 // CHECK-LABEL: @f_ord_greater_than_scalar
 spv.func @f_ord_greater_than_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : f64
   %0 = spv.FOrdGreaterThan %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_ord_greater_than_vector
 spv.func @f_ord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FOrdGreaterThan %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -240,14 +240,14 @@ spv.func @f_ord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>)
 
 // CHECK-LABEL: @f_ord_less_than_scalar
 spv.func @f_ord_less_than_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : f64
   %0 = spv.FOrdLessThan %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_ord_less_than_vector
 spv.func @f_ord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FOrdLessThan %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -258,14 +258,14 @@ spv.func @f_ord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "No
 
 // CHECK-LABEL: @f_ord_less_than_equal_scalar
 spv.func @f_ord_less_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : f64
   %0 = spv.FOrdLessThanEqual %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_ord_less_than_equal_vector
 spv.func @f_ord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FOrdLessThanEqual %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -276,14 +276,14 @@ spv.func @f_ord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64
 
 // CHECK-LABEL: @f_ord_not_equal_scalar
 spv.func @f_ord_not_equal_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : f32
   %0 = spv.FOrdNotEqual %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @f_ord_not_equal_vector
 spv.func @f_ord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
-  // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : !llvm.vec<4 x double>
+  // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : !llvm.vec<4 x f64>
   %0 = spv.FOrdNotEqual %arg0, %arg1 : vector<4xf64>
   spv.Return
 }
@@ -294,14 +294,14 @@ spv.func @f_ord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "No
 
 // CHECK-LABEL: @f_unord_equal_scalar
 spv.func @f_unord_equal_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : f32
   %0 = spv.FUnordEqual %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @f_unord_equal_vector
 spv.func @f_unord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
-  // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : !llvm.vec<4 x double>
+  // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : !llvm.vec<4 x f64>
   %0 = spv.FUnordEqual %arg0, %arg1 : vector<4xf64>
   spv.Return
 }
@@ -312,14 +312,14 @@ spv.func @f_unord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None
 
 // CHECK-LABEL: @f_unord_greater_than_equal_scalar
 spv.func @f_unord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : f64
   %0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_unord_greater_than_equal_vector
 spv.func @f_unord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -330,14 +330,14 @@ spv.func @f_unord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<
 
 // CHECK-LABEL: @f_unord_greater_than_scalar
 spv.func @f_unord_greater_than_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : f64
   %0 = spv.FUnordGreaterThan %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_unord_greater_than_vector
 spv.func @f_unord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FUnordGreaterThan %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -348,14 +348,14 @@ spv.func @f_unord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>
 
 // CHECK-LABEL: @f_unord_less_than_scalar
 spv.func @f_unord_less_than_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : f64
   %0 = spv.FUnordLessThan %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_unord_less_than_vector
 spv.func @f_unord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FUnordLessThan %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -366,14 +366,14 @@ spv.func @f_unord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "
 
 // CHECK-LABEL: @f_unord_less_than_equal_scalar
 spv.func @f_unord_less_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
-  // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : !llvm.double
+  // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : f64
   %0 = spv.FUnordLessThanEqual %arg0, %arg1 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @f_unord_less_than_equal_vector
 spv.func @f_unord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
-  // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : !llvm.vec<2 x double>
+  // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : !llvm.vec<2 x f64>
   %0 = spv.FUnordLessThanEqual %arg0, %arg1 : vector<2xf64>
   spv.Return
 }
@@ -384,14 +384,14 @@ spv.func @f_unord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf
 
 // CHECK-LABEL: @f_unord_not_equal_scalar
 spv.func @f_unord_not_equal_scalar(%arg0: f32, %arg1: f32) "None" {
-  // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : !llvm.float
+  // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : f32
   %0 = spv.FUnordNotEqual %arg0, %arg1 : f32
   spv.Return
 }
 
 // CHECK-LABEL: @f_unord_not_equal_vector
 spv.func @f_unord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
-  // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : !llvm.vec<4 x double>
+  // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : !llvm.vec<4 x f64>
   %0 = spv.FUnordNotEqual %arg0, %arg1 : vector<4xf64>
   spv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
index a95956ef89b3..949aa0376d14 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
@@ -46,16 +46,16 @@ spv.func @integer_constant_vector() "None" {
 
 // CHECK-LABEL: @float_constant_scalar
 spv.func @float_constant_scalar() "None" {
-  // CHECK: llvm.mlir.constant(5.000000e+00 : f16) : !llvm.half
+  // CHECK: llvm.mlir.constant(5.000000e+00 : f16) : f16
   %0 = spv.constant 5.000000e+00 : f16
-  // CHECK: llvm.mlir.constant(5.000000e+00 : f64) : !llvm.double
+  // CHECK: llvm.mlir.constant(5.000000e+00 : f64) : f64
   %1 = spv.constant 5.000000e+00 : f64
   spv.Return
 }
 
 // CHECK-LABEL: @float_constant_vector
 spv.func @float_constant_vector() "None" {
-  // CHECK: llvm.mlir.constant(dense<[2.000000e+00, 3.000000e+00]> : vector<2xf32>) : !llvm.vec<2 x float>
+  // CHECK: llvm.mlir.constant(dense<[2.000000e+00, 3.000000e+00]> : vector<2xf32>) : !llvm.vec<2 x f32>
   %0 = spv.constant dense<[2.000000e+00, 3.000000e+00]> : vector<2xf32>
   spv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
index 981674e7c16f..0928b0fa6c4a 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
@@ -49,7 +49,7 @@ spv.func @const() "Const" {
   spv.Return
 }
 
-// CHECK-LABEL: llvm.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: !llvm.double, %arg3: !llvm.float)
+// CHECK-LABEL: llvm.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: f32)
 spv.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: f32) "None" {
   spv.Return
 }
@@ -65,12 +65,12 @@ spv.func @vector_types(%arg0: vector<2xi64>, %arg1: vector<2xi64>) -> vector<2xi
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: llvm.func @function_calls
-// CHECK-SAME: %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: !llvm.double, %[[ARG3:.*]]: !llvm.vec<2 x i64>, %[[ARG4:.*]]: !llvm.vec<2 x float>
+// CHECK-SAME: %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: f64, %[[ARG3:.*]]: !llvm.vec<2 x i64>, %[[ARG4:.*]]: !llvm.vec<2 x f32>
 spv.func @function_calls(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: vector<2xi64>, %arg4: vector<2xf32>) "None" {
   // CHECK: llvm.call @void_1() : () -> ()
   // CHECK: llvm.call @void_2(%[[ARG3]]) : (!llvm.vec<2 x i64>) -> ()
-  // CHECK: llvm.call @value_scalar(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (i32, i1, !llvm.double) -> i32
-  // CHECK: llvm.call @value_vector(%[[ARG3]], %[[ARG4]]) : (!llvm.vec<2 x i64>, !llvm.vec<2 x float>) -> !llvm.vec<2 x float>
+  // CHECK: llvm.call @value_scalar(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (i32, i1, f64) -> i32
+  // CHECK: llvm.call @value_vector(%[[ARG3]], %[[ARG4]]) : (!llvm.vec<2 x i64>, !llvm.vec<2 x f32>) -> !llvm.vec<2 x f32>
   spv.FunctionCall @void_1() : () -> ()
   spv.FunctionCall @void_2(%arg3) : (vector<2xi64>) -> ()
   %0 = spv.FunctionCall @value_scalar(%arg0, %arg1, %arg2) : (i32, i1, f64) -> i32

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
index c69416aaeed8..62d0dec74060 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/glsl-ops-to-llvm.mlir
@@ -6,9 +6,9 @@
 
 // CHECK-LABEL: @ceil
 spv.func @ceil(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.ceil"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.ceil"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.Ceil %arg0 : f32
-  // CHECK: "llvm.intr.ceil"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.ceil"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.Ceil %arg1 : vector<3xf16>
   spv.Return
 }
@@ -19,9 +19,9 @@ spv.func @ceil(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @cos
 spv.func @cos(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.cos"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.cos"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.Cos %arg0 : f32
-  // CHECK: "llvm.intr.cos"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.cos"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.Cos %arg1 : vector<3xf16>
   spv.Return
 }
@@ -32,9 +32,9 @@ spv.func @cos(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @exp
 spv.func @exp(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.exp"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.exp"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.Exp %arg0 : f32
-  // CHECK: "llvm.intr.exp"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.exp"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.Exp %arg1 : vector<3xf16>
   spv.Return
 }
@@ -45,9 +45,9 @@ spv.func @exp(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @fabs
 spv.func @fabs(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.fabs"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.fabs"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.FAbs %arg0 : f32
-  // CHECK: "llvm.intr.fabs"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.fabs"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.FAbs %arg1 : vector<3xf16>
   spv.Return
 }
@@ -58,9 +58,9 @@ spv.func @fabs(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @floor
 spv.func @floor(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.floor"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.floor"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.Floor %arg0 : f32
-  // CHECK: "llvm.intr.floor"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.floor"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.Floor %arg1 : vector<3xf16>
   spv.Return
 }
@@ -71,9 +71,9 @@ spv.func @floor(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @fmax
 spv.func @fmax(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.maxnum"(%{{.*}}, %{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.maxnum"(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
   %0 = spv.GLSL.FMax %arg0, %arg0 : f32
-  // CHECK: "llvm.intr.maxnum"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x half>, !llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.maxnum"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x f16>, !llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.FMax %arg1, %arg1 : vector<3xf16>
   spv.Return
 }
@@ -84,9 +84,9 @@ spv.func @fmax(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @fmin
 spv.func @fmin(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.minnum"(%{{.*}}, %{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.minnum"(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
   %0 = spv.GLSL.FMin %arg0, %arg0 : f32
-  // CHECK: "llvm.intr.minnum"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x half>, !llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.minnum"(%{{.*}}, %{{.*}}) : (!llvm.vec<3 x f16>, !llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.FMin %arg1, %arg1 : vector<3xf16>
   spv.Return
 }
@@ -97,9 +97,9 @@ spv.func @fmin(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @log
 spv.func @log(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.log"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.log"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.Log %arg0 : f32
-  // CHECK: "llvm.intr.log"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.log"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.Log %arg1 : vector<3xf16>
   spv.Return
 }
@@ -110,9 +110,9 @@ spv.func @log(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @sin
 spv.func @sin(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.sin"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.sin"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.Sin %arg0 : f32
-  // CHECK: "llvm.intr.sin"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.sin"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.Sin %arg1 : vector<3xf16>
   spv.Return
 }
@@ -149,9 +149,9 @@ spv.func @smin(%arg0: i16, %arg1: vector<3xi32>) "None" {
 
 // CHECK-LABEL: @sqrt
 spv.func @sqrt(%arg0: f32, %arg1: vector<3xf16>) "None" {
-  // CHECK: "llvm.intr.sqrt"(%{{.*}}) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.sqrt"(%{{.*}}) : (f32) -> f32
   %0 = spv.GLSL.Sqrt %arg0 : f32
-  // CHECK: "llvm.intr.sqrt"(%{{.*}}) : (!llvm.vec<3 x half>) -> !llvm.vec<3 x half>
+  // CHECK: "llvm.intr.sqrt"(%{{.*}}) : (!llvm.vec<3 x f16>) -> !llvm.vec<3 x f16>
   %1 = spv.GLSL.Sqrt %arg1 : vector<3xf16>
   spv.Return
 }
@@ -162,9 +162,9 @@ spv.func @sqrt(%arg0: f32, %arg1: vector<3xf16>) "None" {
 
 // CHECK-LABEL: @tan
 spv.func @tan(%arg0: f32) "None" {
-  // CHECK: %[[SIN:.*]] = "llvm.intr.sin"(%{{.*}}) : (!llvm.float) -> !llvm.float
-  // CHECK: %[[COS:.*]] = "llvm.intr.cos"(%{{.*}}) : (!llvm.float) -> !llvm.float
-  // CHECK: llvm.fdiv %[[SIN]], %[[COS]] : !llvm.float
+  // CHECK: %[[SIN:.*]] = "llvm.intr.sin"(%{{.*}}) : (f32) -> f32
+  // CHECK: %[[COS:.*]] = "llvm.intr.cos"(%{{.*}}) : (f32) -> f32
+  // CHECK: llvm.fdiv %[[SIN]], %[[COS]] : f32
   %0 = spv.GLSL.Tan %arg0 : f32
   spv.Return
 }
@@ -175,13 +175,13 @@ spv.func @tan(%arg0: f32) "None" {
 
 // CHECK-LABEL: @tanh
 spv.func @tanh(%arg0: f32) "None" {
-  // CHECK: %[[TWO:.*]] = llvm.mlir.constant(2.000000e+00 : f32) : !llvm.float
-  // CHECK: %[[X2:.*]] = llvm.fmul %[[TWO]], %{{.*}} : !llvm.float
-  // CHECK: %[[EXP:.*]] = "llvm.intr.exp"(%[[X2]]) : (!llvm.float) -> !llvm.float
-  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
-  // CHECK: %[[T0:.*]] = llvm.fsub %[[EXP]], %[[ONE]] : !llvm.float
-  // CHECK: %[[T1:.*]] = llvm.fadd %[[EXP]], %[[ONE]] : !llvm.float
-  // CHECK: llvm.fdiv %[[T0]], %[[T1]] : !llvm.float
+  // CHECK: %[[TWO:.*]] = llvm.mlir.constant(2.000000e+00 : f32) : f32
+  // CHECK: %[[X2:.*]] = llvm.fmul %[[TWO]], %{{.*}} : f32
+  // CHECK: %[[EXP:.*]] = "llvm.intr.exp"(%[[X2]]) : (f32) -> f32
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
+  // CHECK: %[[T0:.*]] = llvm.fsub %[[EXP]], %[[ONE]] : f32
+  // CHECK: %[[T1:.*]] = llvm.fadd %[[EXP]], %[[ONE]] : f32
+  // CHECK: llvm.fdiv %[[T0]], %[[T1]] : f32
   %0 = spv.GLSL.Tanh %arg0 : f32
   spv.Return
 }
@@ -192,9 +192,9 @@ spv.func @tanh(%arg0: f32) "None" {
 
 // CHECK-LABEL: @inverse_sqrt
 spv.func @inverse_sqrt(%arg0: f32) "None" {
-  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
-  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%{{.*}}) : (!llvm.float) -> !llvm.float
-  // CHECK: llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.float
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
+  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%{{.*}}) : (f32) -> f32
+  // CHECK: llvm.fdiv %[[ONE]], %[[SQRT]] : f32
   %0 = spv.GLSL.InverseSqrt %arg0 : f32
   spv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
index 66c8d4ee28e2..ccf8068320ea 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
@@ -10,7 +10,7 @@ spv.func @access_chain() "None" {
   %0 = spv.constant 1: i32
   %1 = spv.Variable : !spv.ptr<!spv.struct<(f32, !spv.array<4xf32>)>, Function>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
-  // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %[[ONE]], %[[ONE]]] : (!llvm.ptr<struct<packed (float, array<4 x float>)>>, i32, i32, i32) -> !llvm.ptr<float>
+  // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %[[ONE]], %[[ONE]]] : (!llvm.ptr<struct<packed (f32, array<4 x f32>)>>, i32, i32, i32) -> !llvm.ptr<f32>
   %2 = spv.AccessChain %1[%0, %0] : !spv.ptr<!spv.struct<(f32, !spv.array<4xf32>)>, Function>, i32, i32
   spv.Return
 }
@@ -19,7 +19,7 @@ spv.func @access_chain() "None" {
 spv.func @access_chain_array(%arg0 : i32) "None" {
   %0 = spv.Variable : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
-  // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %{{.*}}] : (!llvm.ptr<array<4 x array<4 x float>>>, i32, i32) -> !llvm.ptr<array<4 x float>>
+  // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %{{.*}}] : (!llvm.ptr<array<4 x array<4 x f32>>>, i32, i32) -> !llvm.ptr<array<4 x f32>>
   %1 = spv.AccessChain %0[%arg0] : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>, i32
   %2 = spv.Load "Function" %1 ["Volatile"] : !spv.array<4xf32>
   spv.Return
@@ -30,14 +30,14 @@ spv.func @access_chain_array(%arg0 : i32) "None" {
 //===----------------------------------------------------------------------===//
 
 spv.module Logical GLSL450 {
-  // CHECK: llvm.mlir.global external constant @var() : !llvm.float
+  // CHECK: llvm.mlir.global external constant @var() : f32
   spv.globalVariable @var : !spv.ptr<f32, Input>
 }
 
 spv.module Logical GLSL450 {
-  //       CHECK: llvm.mlir.global private @struct() : !llvm.struct<packed (float, array<10 x float>)>
+  //       CHECK: llvm.mlir.global private @struct() : !llvm.struct<packed (f32, array<10 x f32>)>
   // CHECK-LABEL: @func
-  //       CHECK:   llvm.mlir.addressof @struct : !llvm.ptr<struct<packed (float, array<10 x float>)>>
+  //       CHECK:   llvm.mlir.addressof @struct : !llvm.ptr<struct<packed (f32, array<10 x f32>)>>
   spv.globalVariable @struct : !spv.ptr<!spv.struct<(f32, !spv.array<10xf32>)>, Private>
   spv.func @func() "None" {
     %0 = spv.mlir.addressof @struct : !spv.ptr<!spv.struct<(f32, !spv.array<10xf32>)>, Private>
@@ -74,7 +74,7 @@ spv.module @name Logical GLSL450 {
 // CHECK-LABEL: @load
 spv.func @load() "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  //  CHECK: llvm.load %{{.*}} : !llvm.ptr<float>
+  //  CHECK: llvm.load %{{.*}} : !llvm.ptr<f32>
   %1 = spv.Load "Function" %0 : f32
   spv.Return
 }
@@ -82,7 +82,7 @@ spv.func @load() "None" {
 // CHECK-LABEL: @load_none
 spv.func @load_none() "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  //  CHECK: llvm.load %{{.*}} : !llvm.ptr<float>
+  //  CHECK: llvm.load %{{.*}} : !llvm.ptr<f32>
   %1 = spv.Load "Function" %0 ["None"] : f32
   spv.Return
 }
@@ -90,7 +90,7 @@ spv.func @load_none() "None" {
 // CHECK-LABEL: @load_with_alignment
 spv.func @load_with_alignment() "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: llvm.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr<float>
+  // CHECK: llvm.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr<f32>
   %1 = spv.Load "Function" %0 ["Aligned", 4] : f32
   spv.Return
 }
@@ -98,7 +98,7 @@ spv.func @load_with_alignment() "None" {
 // CHECK-LABEL: @load_volatile
 spv.func @load_volatile() "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: llvm.load volatile %{{.*}} : !llvm.ptr<float>
+  // CHECK: llvm.load volatile %{{.*}} : !llvm.ptr<f32>
   %1 = spv.Load "Function" %0 ["Volatile"] : f32
   spv.Return
 }
@@ -106,7 +106,7 @@ spv.func @load_volatile() "None" {
 // CHECK-LABEL: @load_nontemporal
 spv.func @load_nontemporal() "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: llvm.load %{{.*}} {nontemporal} : !llvm.ptr<float>
+  // CHECK: llvm.load %{{.*}} {nontemporal} : !llvm.ptr<f32>
   %1 = spv.Load "Function" %0 ["Nontemporal"] : f32
   spv.Return
 }
@@ -118,7 +118,7 @@ spv.func @load_nontemporal() "None" {
 // CHECK-LABEL: @store
 spv.func @store(%arg0 : f32) "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.ptr<float>
+  // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.ptr<f32>
   spv.Store "Function" %0, %arg0 : f32
   spv.Return
 }
@@ -126,7 +126,7 @@ spv.func @store(%arg0 : f32) "None" {
 // CHECK-LABEL: @store_composite
 spv.func @store_composite(%arg0 : !spv.struct<(f64)>) "None" {
   %0 = spv.Variable : !spv.ptr<!spv.struct<(f64)>, Function>
-  // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.ptr<struct<packed (double)>>
+  // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.ptr<struct<packed (f64)>>
   spv.Store "Function" %0, %arg0 : !spv.struct<(f64)>
   spv.Return
 }
@@ -134,7 +134,7 @@ spv.func @store_composite(%arg0 : !spv.struct<(f64)>) "None" {
 // CHECK-LABEL: @store_with_alignment
 spv.func @store_with_alignment(%arg0 : f32) "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 4 : i64} : !llvm.ptr<float>
+  // CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 4 : i64} : !llvm.ptr<f32>
   spv.Store "Function" %0, %arg0 ["Aligned", 4] : f32
   spv.Return
 }
@@ -142,7 +142,7 @@ spv.func @store_with_alignment(%arg0 : f32) "None" {
 // CHECK-LABEL: @store_volatile
 spv.func @store_volatile(%arg0 : f32) "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: llvm.store volatile %{{.*}}, %{{.*}} : !llvm.ptr<float>
+  // CHECK: llvm.store volatile %{{.*}}, %{{.*}} : !llvm.ptr<f32>
   spv.Store "Function" %0, %arg0 ["Volatile"] : f32
   spv.Return
 }
@@ -150,7 +150,7 @@ spv.func @store_volatile(%arg0 : f32) "None" {
 // CHECK-LABEL: @store_nontemporal
 spv.func @store_nontemporal(%arg0 : f32) "None" {
   %0 = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: llvm.store %{{.*}}, %{{.*}} {nontemporal} : !llvm.ptr<float>
+  // CHECK: llvm.store %{{.*}}, %{{.*}} {nontemporal} : !llvm.ptr<f32>
   spv.Store "Function" %0, %arg0 ["Nontemporal"] : f32
   spv.Return
 }
@@ -162,7 +162,7 @@ spv.func @store_nontemporal(%arg0 : f32) "None" {
 // CHECK-LABEL: @variable_scalar
 spv.func @variable_scalar() "None" {
   // CHECK: %[[SIZE1:.*]] = llvm.mlir.constant(1 : i32) : i32
-  // CHECK: llvm.alloca %[[SIZE1]] x !llvm.float : (i32) -> !llvm.ptr<float>
+  // CHECK: llvm.alloca %[[SIZE1]] x f32 : (i32) -> !llvm.ptr<f32>
   %0 = spv.Variable : !spv.ptr<f32, Function>
   // CHECK: %[[SIZE2:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: llvm.alloca %[[SIZE2]] x i8 : (i32) -> !llvm.ptr<i8>
@@ -184,7 +184,7 @@ spv.func @variable_scalar_with_initialization() "None" {
 // CHECK-LABEL: @variable_vector
 spv.func @variable_vector() "None" {
   // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
-  // CHECK: llvm.alloca  %[[SIZE]] x !llvm.vec<3 x float> : (i32) -> !llvm.ptr<vec<3 x float>>
+  // CHECK: llvm.alloca  %[[SIZE]] x !llvm.vec<3 x f32> : (i32) -> !llvm.ptr<vec<3 x f32>>
   %0 = spv.Variable : !spv.ptr<vector<3xf32>, Function>
   spv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
index 127a3b87d0a1..b95c3ff6b003 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
@@ -6,7 +6,7 @@
 
 // CHECK-LABEL: @composite_extract_array
 spv.func @composite_extract_array(%arg: !spv.array<4x!spv.array<4xf32>>) "None" {
-  // CHECK: llvm.extractvalue %{{.*}}[1 : i32, 3 : i32] : !llvm.array<4 x array<4 x float>>
+  // CHECK: llvm.extractvalue %{{.*}}[1 : i32, 3 : i32] : !llvm.array<4 x array<4 x f32>>
   %0 = spv.CompositeExtract %arg[1 : i32, 3 : i32] : !spv.array<4x!spv.array<4xf32>>
   spv.Return
 }
@@ -14,7 +14,7 @@ spv.func @composite_extract_array(%arg: !spv.array<4x!spv.array<4xf32>>) "None"
 // CHECK-LABEL: @composite_extract_vector
 spv.func @composite_extract_vector(%arg: vector<3xf32>) "None" {
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
-  // CHECK: llvm.extractelement %{{.*}}[%[[ZERO]] : i32] : !llvm.vec<3 x float>
+  // CHECK: llvm.extractelement %{{.*}}[%[[ZERO]] : i32] : !llvm.vec<3 x f32>
   %0 = spv.CompositeExtract %arg[0 : i32] : vector<3xf32>
   spv.Return
 }
@@ -25,7 +25,7 @@ spv.func @composite_extract_vector(%arg: vector<3xf32>) "None" {
 
 // CHECK-LABEL: @composite_insert_struct
 spv.func @composite_insert_struct(%arg0: i32, %arg1: !spv.struct<(f32, !spv.array<4xi32>)>) "None" {
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1 : i32, 3 : i32] : !llvm.struct<packed (float, array<4 x i32>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1 : i32, 3 : i32] : !llvm.struct<packed (f32, array<4 x i32>)>
   %0 = spv.CompositeInsert %arg0, %arg1[1 : i32, 3 : i32] : i32 into !spv.struct<(f32, !spv.array<4xi32>)>
   spv.Return
 }
@@ -33,7 +33,7 @@ spv.func @composite_insert_struct(%arg0: i32, %arg1: !spv.struct<(f32, !spv.arra
 // CHECK-LABEL: @composite_insert_vector
 spv.func @composite_insert_vector(%arg0: vector<3xf32>, %arg1: f32) "None" {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
-  // CHECK: llvm.insertelement %{{.*}}, %{{.*}}[%[[ONE]] : i32] : !llvm.vec<3 x float>
+  // CHECK: llvm.insertelement %{{.*}}, %{{.*}}[%[[ONE]] : i32] : !llvm.vec<3 x f32>
   %0 = spv.CompositeInsert %arg1, %arg0[1 : i32] : f32 into vector<3xf32>
   spv.Return
 }
@@ -46,7 +46,7 @@ spv.func @composite_insert_vector(%arg0: vector<3xf32>, %arg1: f32) "None" {
 spv.func @select_scalar(%arg0: i1, %arg1: vector<3xi32>, %arg2: f32) "None" {
   // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, !llvm.vec<3 x i32>
   %0 = spv.Select %arg0, %arg1, %arg1 : i1, vector<3xi32>
-  // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, !llvm.float
+  // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, f32
   %1 = spv.Select %arg0, %arg2, %arg2 : i1, f32
   spv.Return
 }
@@ -112,7 +112,7 @@ spv.module Logical OpenCL {
 
 // CHECK-LABEL: @undef_scalar
 spv.func @undef_scalar() "None" {
-  // CHECK: llvm.mlir.undef : !llvm.float
+  // CHECK: llvm.mlir.undef : f32
   %0 = spv.undef : f32
   spv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
index cade893381d6..f65ee5dc86f5 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
@@ -4,17 +4,17 @@
 // Array type
 //===----------------------------------------------------------------------===//
 
-// CHECK-LABEL: @array(!llvm.array<16 x float>, !llvm.array<32 x vec<4 x float>>)
+// CHECK-LABEL: @array(!llvm.array<16 x f32>, !llvm.array<32 x vec<4 x f32>>)
 spv.func @array(!spv.array<16 x f32>, !spv.array< 32 x vector<4xf32> >) "None"
 
-// CHECK-LABEL: @array_with_natural_stride(!llvm.array<16 x float>)
+// CHECK-LABEL: @array_with_natural_stride(!llvm.array<16 x f32>)
 spv.func @array_with_natural_stride(!spv.array<16 x f32, stride=4>) "None"
 
 //===----------------------------------------------------------------------===//
 // Pointer type
 //===----------------------------------------------------------------------===//
 
-// CHECK-LABEL: @pointer_scalar(!llvm.ptr<i1>, !llvm.ptr<float>)
+// CHECK-LABEL: @pointer_scalar(!llvm.ptr<i1>, !llvm.ptr<f32>)
 spv.func @pointer_scalar(!spv.ptr<i1, Uniform>, !spv.ptr<f32, Private>) "None"
 
 // CHECK-LABEL: @pointer_vector(!llvm.ptr<vec<4 x i32>>)
@@ -24,17 +24,17 @@ spv.func @pointer_vector(!spv.ptr<vector<4xi32>, Function>) "None"
 // Runtime array type
 //===----------------------------------------------------------------------===//
 
-// CHECK-LABEL: @runtime_array_vector(!llvm.array<0 x vec<4 x float>>)
+// CHECK-LABEL: @runtime_array_vector(!llvm.array<0 x vec<4 x f32>>)
 spv.func @runtime_array_vector(!spv.rtarray< vector<4xf32> >) "None"
 
-// CHECK-LABEL: @runtime_array_scalar(!llvm.array<0 x float>)
+// CHECK-LABEL: @runtime_array_scalar(!llvm.array<0 x f32>)
 spv.func @runtime_array_scalar(!spv.rtarray<f32>) "None"
 
 //===----------------------------------------------------------------------===//
 // Struct type
 //===----------------------------------------------------------------------===//
 
-// CHECK-LABEL: @struct(!llvm.struct<packed (double)>)
+// CHECK-LABEL: @struct(!llvm.struct<packed (f64)>)
 spv.func @struct(!spv.struct<(f64)>) "None"
 
 // CHECK-LABEL: @struct_nested(!llvm.struct<packed (i32, struct<packed (i64, i32)>)>)

diff  --git a/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir b/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
index 6d37987bb218..ae2ead8d95f8 100644
--- a/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/calling-convention.mlir
@@ -8,11 +8,11 @@
 
 // An external function is transformed into the glue around calling an interface function.
 // CHECK-LABEL: @external
-// CHECK: %[[ALLOC0:.*]]: !llvm.ptr<float>, %[[ALIGN0:.*]]: !llvm.ptr<float>, %[[OFFSET0:.*]]: i64, %[[SIZE00:.*]]: i64, %[[SIZE01:.*]]: i64, %[[STRIDE00:.*]]: i64, %[[STRIDE01:.*]]: i64,
-// CHECK: %[[ALLOC1:.*]]: !llvm.ptr<float>, %[[ALIGN1:.*]]: !llvm.ptr<float>, %[[OFFSET1:.*]]: i64)
+// CHECK: %[[ALLOC0:.*]]: !llvm.ptr<f32>, %[[ALIGN0:.*]]: !llvm.ptr<f32>, %[[OFFSET0:.*]]: i64, %[[SIZE00:.*]]: i64, %[[SIZE01:.*]]: i64, %[[STRIDE00:.*]]: i64, %[[STRIDE01:.*]]: i64,
+// CHECK: %[[ALLOC1:.*]]: !llvm.ptr<f32>, %[[ALIGN1:.*]]: !llvm.ptr<f32>, %[[OFFSET1:.*]]: i64)
 func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
   // Populate the descriptor for arg0.
-  // CHECK: %[[DESC00:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC00:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESC01:.*]] = llvm.insertvalue %arg0, %[[DESC00]][0]
   // CHECK: %[[DESC02:.*]] = llvm.insertvalue %arg1, %[[DESC01]][1]
   // CHECK: %[[DESC03:.*]] = llvm.insertvalue %arg2, %[[DESC02]][2]
@@ -23,18 +23,18 @@ func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
 
   // Allocate on stack and store to comply with C calling convention.
   // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index)
-  // CHECK: %[[DESC0_ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC0_ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.store %[[DESC07]], %[[DESC0_ALLOCA]]
 
   // Populate the descriptor for arg1.
-  // CHECK: %[[DESC10:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-  // CHECK: %[[DESC11:.*]] = llvm.insertvalue %arg7, %[[DESC10]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-  // CHECK: %[[DESC12:.*]] = llvm.insertvalue %arg8, %[[DESC11]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-  // CHECK: %[[DESC13:.*]] = llvm.insertvalue %arg9, %[[DESC12]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+  // CHECK: %[[DESC10:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+  // CHECK: %[[DESC11:.*]] = llvm.insertvalue %arg7, %[[DESC10]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+  // CHECK: %[[DESC12:.*]] = llvm.insertvalue %arg8, %[[DESC11]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+  // CHECK: %[[DESC13:.*]] = llvm.insertvalue %arg9, %[[DESC12]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 
   // Allocate on stack and store to comply with C calling convention.
   // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index)
-  // CHECK: %[[DESC1_ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr<float>, ptr<float>, i64)>
+  // CHECK: %[[DESC1_ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
   // CHECK: llvm.store %[[DESC13]], %[[DESC1_ALLOCA]]
 
   // Call the interface function.
@@ -42,18 +42,18 @@ func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
 
 // Verify that an interface function is emitted.
 // CHECK-LABEL: llvm.func @_mlir_ciface_external
-// CHECK: (!llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>>, !llvm.ptr<struct<(ptr<float>, ptr<float>, i64)>>)
+// CHECK: (!llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>, !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64)>>)
 
 // Verify that the return value is not affected.
 // CHECK-LABEL: @returner
-// CHECK: -> !llvm.struct<(struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>, struct<(ptr<float>, ptr<float>, i64)>)>
+// CHECK: -> !llvm.struct<(struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>, struct<(ptr<f32>, ptr<f32>, i64)>)>
 func private @returner() -> (memref<?x?xf32>, memref<f32>)
 
 // CHECK-LABEL: @caller
 func @caller() {
   %0:2 = call @returner() : () -> (memref<?x?xf32>, memref<f32>)
   // Extract individual values from the descriptor for the first memref.
-  // CHECK: %[[ALLOC0:.*]] = llvm.extractvalue %[[DESC0:.*]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[ALLOC0:.*]] = llvm.extractvalue %[[DESC0:.*]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[ALIGN0:.*]] = llvm.extractvalue %[[DESC0]][1]
   // CHECK: %[[OFFSET0:.*]] = llvm.extractvalue %[[DESC0]][2]
   // CHECK: %[[SIZE00:.*]] = llvm.extractvalue %[[DESC0]][3, 0]
@@ -62,12 +62,12 @@ func @caller() {
   // CHECK: %[[STRIDE01:.*]] = llvm.extractvalue %[[DESC0]][4, 1]
 
   // Extract individual values from the descriptor for the second memref.
-  // CHECK: %[[ALLOC1:.*]] = llvm.extractvalue %[[DESC1:.*]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+  // CHECK: %[[ALLOC1:.*]] = llvm.extractvalue %[[DESC1:.*]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
   // CHECK: %[[ALIGN1:.*]] = llvm.extractvalue %[[DESC1]][1]
   // CHECK: %[[OFFSET1:.*]] = llvm.extractvalue %[[DESC1]][2]
 
   // Forward the values to the call.
-  // CHECK: llvm.call @external(%[[ALLOC0]], %[[ALIGN0]], %[[OFFSET0]], %[[SIZE00]], %[[SIZE01]], %[[STRIDE00]], %[[STRIDE01]], %[[ALLOC1]], %[[ALIGN1]], %[[OFFSET1]]) : (!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64, i64, i64, !llvm.ptr<float>, !llvm.ptr<float>, i64) -> ()
+  // CHECK: llvm.call @external(%[[ALLOC0]], %[[ALIGN0]], %[[OFFSET0]], %[[SIZE00]], %[[SIZE01]], %[[STRIDE00]], %[[STRIDE01]], %[[ALLOC1]], %[[ALIGN1]], %[[OFFSET1]]) : (!llvm.ptr<f32>, !llvm.ptr<f32>, i64, i64, i64, i64, i64, !llvm.ptr<f32>, !llvm.ptr<f32>, i64) -> ()
   call @external(%0#0, %0#1) : (memref<?x?xf32>, memref<f32>) -> ()
   return
 }
@@ -81,9 +81,9 @@ func @callee(%arg0: memref<?xf32>, %arg1: index) {
 
 // Verify that an interface function is emitted.
 // CHECK-LABEL: @_mlir_ciface_callee
-// CHECK: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>
+// CHECK: %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>>
   // Load the memref descriptor pointer.
-  // CHECK: %[[DESC:.*]] = llvm.load %[[ARG0]] : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>>
+  // CHECK: %[[DESC:.*]] = llvm.load %[[ARG0]] : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>>
 
   // Extract individual components of the descriptor.
   // CHECK: %[[ALLOC:.*]] = llvm.extractvalue %[[DESC]][0]
@@ -93,7 +93,7 @@ func @callee(%arg0: memref<?xf32>, %arg1: index) {
   // CHECK: %[[STRIDE:.*]] = llvm.extractvalue %[[DESC]][4, 0]
 
   // Forward the descriptor components to the call.
-  // CHECK: llvm.call @callee(%[[ALLOC]], %[[ALIGN]], %[[OFFSET]], %[[SIZE]], %[[STRIDE]], %{{.*}}) : (!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64, i64) -> ()
+  // CHECK: llvm.call @callee(%[[ALLOC]], %[[ALIGN]], %[[OFFSET]], %[[SIZE]], %[[STRIDE]], %{{.*}}) : (!llvm.ptr<f32>, !llvm.ptr<f32>, i64, i64, i64, i64) -> ()
 
 //   EMIT_C_ATTRIBUTE-NOT: @mlir_ciface_callee
 

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
index 080ba531a14d..9ea40718e260 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-dynamic-memref-ops.mlir
@@ -2,11 +2,11 @@
 // RUN: mlir-opt -convert-std-to-llvm='use-aligned-alloc=1' %s | FileCheck %s --check-prefix=ALIGNED-ALLOC
 
 // CHECK-LABEL: func @check_strided_memref_arguments(
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
 func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)->(20 * i + j + 1)>>,
                                      %dynamic : memref<?x?xf32, affine_map<(i,j)[M]->(M * i + j + 1)>>,
@@ -15,48 +15,48 @@ func @check_strided_memref_arguments(%static: memref<10x20xf32, affine_map<(i,j)
 }
 
 // CHECK-LABEL: func @check_arguments
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
 func @check_arguments(%static: memref<10x20xf32>, %dynamic : memref<?x?xf32>, %mixed : memref<10x?xf32>) {
   return
 }
 
 // CHECK-LABEL: func @mixed_alloc(
-//       CHECK:   %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)> {
+//       CHECK:   %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)> {
 func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
 //       CHECK:  %[[c42:.*]] = llvm.mlir.constant(42 : index) : i64
 //  CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 //  CHECK-NEXT:  %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64
 //  CHECK-NEXT:  %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64
-//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-//  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 //  CHECK-NEXT:  llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr<i8>
-//  CHECK-NEXT:  llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //  CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : i64
-//  CHECK-NEXT:  llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[c42]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[st0]], %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[one]], %{{.*}}[4, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[c42]], %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[3, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[st0]], %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[one]], %{{.*}}[4, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
   %0 = alloc(%arg0, %arg1) : memref<?x42x?xf32>
-//  CHECK-NEXT:  llvm.return %{{.*}} : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//  CHECK-NEXT:  llvm.return %{{.*}} : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
   return %0 : memref<?x42x?xf32>
 }
 
 // CHECK-LABEL: func @mixed_dealloc
 func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
-//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-// CHECK-NEXT:  %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<float> to !llvm.ptr<i8>
+//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+// CHECK-NEXT:  %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // CHECK-NEXT:  llvm.call @free(%[[ptri8]]) : (!llvm.ptr<i8>) -> ()
   dealloc %arg0 : memref<?x42x?xf32>
 // CHECK-NEXT:  llvm.return
@@ -64,84 +64,84 @@ func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
 }
 
 // CHECK-LABEL: func @dynamic_alloc(
-//       CHECK:   %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
+//       CHECK:   %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> {
 func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 //  CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 //  CHECK-NEXT:  %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64
-//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-//  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 //  CHECK-NEXT:  llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr<i8>
-//  CHECK-NEXT:  llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : i64
-//  CHECK-NEXT:  llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[one]], %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[one]], %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   %0 = alloc(%arg0, %arg1) : memref<?x?xf32>
-//  CHECK-NEXT:  llvm.return %{{.*}} : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.return %{{.*}} : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   return %0 : memref<?x?xf32>
 }
 
 // -----
 
 // CHECK-LABEL: func @dynamic_alloca
-// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
+// CHECK: %[[M:.*]]: i64, %[[N:.*]]: i64) -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> {
 func @dynamic_alloca(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 //  CHECK-NEXT:  %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
 //  CHECK-NEXT:  %[[num_elems:.*]] = llvm.mul %[[N]], %[[M]] : i64
-//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-//  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
-//  CHECK-NEXT:  %[[allocated:.*]] = llvm.alloca %[[sz_bytes]] x !llvm.float : (i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[allocated]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[allocated]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
+//  CHECK-NEXT:  %[[allocated:.*]] = llvm.alloca %[[sz_bytes]] x f32 : (i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[allocated]], %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[allocated]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[off:.*]] = llvm.mlir.constant(0 : index) : i64
-//  CHECK-NEXT:  llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  llvm.insertvalue %[[st1]], %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[off]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[M]], %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[N]], %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  llvm.insertvalue %[[st1]], %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   %0 = alloca(%arg0, %arg1) : memref<?x?xf32>
 
 // Test with explicitly specified alignment. llvm.alloca takes care of the
 // alignment. The same pointer is thus used for allocation and aligned
 // accesses.
-// CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (i64) -> !llvm.ptr<float>
-// CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x f32 {alignment = 32 : i64} : (i64) -> !llvm.ptr<f32>
+// CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   alloca(%arg0, %arg1) {alignment = 32} : memref<?x?xf32>
   return %0 : memref<?x?xf32>
 }
 
 // CHECK-LABEL: func @dynamic_dealloc
 func @dynamic_dealloc(%arg0: memref<?x?xf32>) {
-//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT:  %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<float> to !llvm.ptr<i8>
+//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK-NEXT:  %[[ptri8:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // CHECK-NEXT:  llvm.call @free(%[[ptri8]]) : (!llvm.ptr<i8>) -> ()
   dealloc %arg0 : memref<?x?xf32>
   return
 }
 
-// CHECK-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
-// ALIGNED-ALLOC-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
+// CHECK-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> {
+// ALIGNED-ALLOC-LABEL: func @stdlib_aligned_alloc({{.*}}) -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> {
 func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
 // ALIGNED-ALLOC-NEXT:  %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
-// ALIGNED-ALLOC-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// ALIGNED-ALLOC-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// ALIGNED-ALLOC-NEXT:  %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// ALIGNED-ALLOC-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// ALIGNED-ALLOC-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// ALIGNED-ALLOC-NEXT:  %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 // ALIGNED-ALLOC-NEXT:  %[[alignment:.*]] = llvm.mlir.constant(32 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) : (i64, i64) -> !llvm.ptr<i8>
-// ALIGNED-ALLOC-NEXT:  llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<float>
+// ALIGNED-ALLOC-NEXT:  llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<f32>
   %0 = alloc() {alignment = 32} : memref<32x18xf32>
   // Do another alloc just to test that we have a unique declaration for
   // aligned_alloc.
@@ -176,24 +176,24 @@ func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
 }
 
 // CHECK-LABEL: func @mixed_load(
-// CHECK-COUNT-2: !llvm.ptr<float>,
+// CHECK-COUNT-2: !llvm.ptr<f32>,
 // CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64
 // CHECK:         %[[I:.*]]: i64,
 // CHECK:         %[[J:.*]]: i64)
 func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
-//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 //  CHECK-NEXT:  %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.load %[[addr]] : !llvm.ptr<float>
+//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.load %[[addr]] : !llvm.ptr<f32>
   %0 = load %mixed[%i, %j] : memref<42x?xf32>
   return
 }
 
 // CHECK-LABEL: func @dynamic_load(
-// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
 // CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i64
@@ -202,19 +202,19 @@ func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
 // CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: i64
 func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
-//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 //  CHECK-NEXT:  %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.load %[[addr]] : !llvm.ptr<float>
+//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.load %[[addr]] : !llvm.ptr<f32>
   %0 = load %dynamic[%i, %j] : memref<?x?xf32>
   return
 }
 
 // CHECK-LABEL: func @prefetch
-// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
 // CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i64
@@ -223,32 +223,32 @@ func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
 // CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: i64
 func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
-//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // CHECK-NEXT:  %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 // CHECK-NEXT:  %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-// CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+// CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
 // CHECK-NEXT:  [[C1:%.*]] = llvm.mlir.constant(1 : i32) : i32
 // CHECK-NEXT:  [[C3:%.*]] = llvm.mlir.constant(3 : i32) : i32
 // CHECK-NEXT:  [[C1_1:%.*]] = llvm.mlir.constant(1 : i32) : i32
-// CHECK-NEXT:  "llvm.intr.prefetch"(%[[addr]], [[C1]], [[C3]], [[C1_1]]) : (!llvm.ptr<float>, i32, i32, i32) -> ()
+// CHECK-NEXT:  "llvm.intr.prefetch"(%[[addr]], [[C1]], [[C3]], [[C1_1]]) : (!llvm.ptr<f32>, i32, i32, i32) -> ()
   prefetch %A[%i, %j], write, locality<3>, data : memref<?x?xf32>
 // CHECK:  [[C0:%.*]] = llvm.mlir.constant(0 : i32) : i32
 // CHECK:  [[C0_1:%.*]] = llvm.mlir.constant(0 : i32) : i32
 // CHECK:  [[C1_2:%.*]] = llvm.mlir.constant(1 : i32) : i32
-// CHECK:  "llvm.intr.prefetch"(%{{.*}}, [[C0]], [[C0_1]], [[C1_2]]) : (!llvm.ptr<float>, i32, i32, i32) -> ()
+// CHECK:  "llvm.intr.prefetch"(%{{.*}}, [[C0]], [[C0_1]], [[C1_2]]) : (!llvm.ptr<f32>, i32, i32, i32) -> ()
   prefetch %A[%i, %j], read, locality<0>, data : memref<?x?xf32>
 // CHECK:  [[C0_2:%.*]] = llvm.mlir.constant(0 : i32) : i32
 // CHECK:  [[C2:%.*]] = llvm.mlir.constant(2 : i32) : i32
 // CHECK:  [[C0_3:%.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:  "llvm.intr.prefetch"(%{{.*}}, [[C0_2]], [[C2]], [[C0_3]]) : (!llvm.ptr<float>, i32, i32, i32) -> ()
+// CHECK:  "llvm.intr.prefetch"(%{{.*}}, [[C0_2]], [[C2]], [[C0_3]]) : (!llvm.ptr<f32>, i32, i32, i32) -> ()
   prefetch %A[%i, %j], read, locality<2>, instr : memref<?x?xf32>
   return
 }
 
 // CHECK-LABEL: func @dynamic_store
-// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
 // CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i64
@@ -257,19 +257,19 @@ func @prefetch(%A : memref<?x?xf32>, %i : index, %j : index) {
 // CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: i64
 func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f32) {
-//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 //  CHECK-NEXT:  %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
+//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<f32>
   store %val, %dynamic[%i, %j] : memref<?x?xf32>
   return
 }
 
 // CHECK-LABEL: func @mixed_store
-// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
 // CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i64
@@ -278,12 +278,12 @@ func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %val : f
 // CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: i64
 func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val : f32) {
-//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-NEXT:  %[[st0:.*]] = llvm.extractvalue %[[ld]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 //  CHECK-NEXT:  %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
+//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<f32>
   store %val, %mixed[%i, %j] : memref<42x?xf32>
   return
 }
@@ -340,9 +340,9 @@ func @memref_cast_mixed_to_mixed(%mixed : memref<42x?xf32>) {
 // CHECK-LABEL: func @memref_cast_ranked_to_unranked
 func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
 // CHECK-DAG:  %[[c:.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK-DAG:  %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>>
-// CHECK-DAG:  llvm.store %{{.*}}, %[[p]] : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>>
-// CHECK-DAG:  %[[p2:.*]] = llvm.bitcast %[[p]] : !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>> to !llvm.ptr<i8>
+// CHECK-DAG:  %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>>
+// CHECK-DAG:  llvm.store %{{.*}}, %[[p]] : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>>
+// CHECK-DAG:  %[[p2:.*]] = llvm.bitcast %[[p]] : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>> to !llvm.ptr<i8>
 // CHECK-DAG:  %[[r:.*]] = llvm.mlir.constant(3 : i64) : i64
 // CHECK    :  llvm.mlir.undef : !llvm.struct<(i64, ptr<i8>)>
 // CHECK-DAG:  llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i64, ptr<i8>)>
@@ -354,7 +354,7 @@ func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
 // CHECK-LABEL: func @memref_cast_unranked_to_ranked
 func @memref_cast_unranked_to_ranked(%arg : memref<*xf32>) {
 //      CHECK: %[[p:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(i64, ptr<i8>)>
-// CHECK-NEXT: llvm.bitcast %[[p]] : !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>>
+// CHECK-NEXT: llvm.bitcast %[[p]] : !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>>
   %0 = memref_cast %arg : memref<*xf32> to memref<?x?x10x2xf32>
   return
 }
@@ -364,25 +364,25 @@ func @mixed_memref_dim(%mixed : memref<42x?x?x13x?xf32>) {
 // CHECK: llvm.mlir.constant(42 : index) : i64
   %c0 = constant 0 : index
   %0 = dim %mixed, %c0 : memref<42x?x?x13x?xf32>
-// CHECK: llvm.extractvalue %[[ld:.*]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+// CHECK: llvm.extractvalue %[[ld:.*]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
   %c1 = constant 1 : index
   %1 = dim %mixed, %c1 : memref<42x?x?x13x?xf32>
-// CHECK: llvm.extractvalue %[[ld]][3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+// CHECK: llvm.extractvalue %[[ld]][3, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
   %c2 = constant 2 : index
   %2 = dim %mixed, %c2 : memref<42x?x?x13x?xf32>
 // CHECK: llvm.mlir.constant(13 : index) : i64
   %c3 = constant 3 : index
   %3 = dim %mixed, %c3 : memref<42x?x?x13x?xf32>
-// CHECK: llvm.extractvalue %[[ld]][3, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+// CHECK: llvm.extractvalue %[[ld]][3, 4] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
   %c4 = constant 4 : index
   %4 = dim %mixed, %c4 : memref<42x?x?x13x?xf32>
   return
 }
 
 // CHECK-LABEL: @memref_dim_with_dyn_index
-// CHECK-SAME: %[[ALLOC_PTR:.*]]: !llvm.ptr<float>, %[[ALIGN_PTR:.*]]: !llvm.ptr<float>, %[[OFFSET:.*]]: i64, %[[SIZE0:.*]]: i64, %[[SIZE1:.*]]: i64, %[[STRIDE0:.*]]: i64, %[[STRIDE1:.*]]: i64, %[[IDX:.*]]: i64) -> i64
+// CHECK-SAME: %[[ALLOC_PTR:.*]]: !llvm.ptr<f32>, %[[ALIGN_PTR:.*]]: !llvm.ptr<f32>, %[[OFFSET:.*]]: i64, %[[SIZE0:.*]]: i64, %[[SIZE1:.*]]: i64, %[[STRIDE0:.*]]: i64, %[[STRIDE1:.*]]: i64, %[[IDX:.*]]: i64) -> i64
 func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index {
-  // CHECK-NEXT: %[[DESCR0:.*]] = llvm.mlir.undef : [[DESCR_TY:!llvm.struct<\(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>\)>]]
+  // CHECK-NEXT: %[[DESCR0:.*]] = llvm.mlir.undef : [[DESCR_TY:!llvm.struct<\(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>\)>]]
   // CHECK-NEXT: %[[DESCR1:.*]] = llvm.insertvalue %[[ALLOC_PTR]], %[[DESCR0]][0] : [[DESCR_TY]]
   // CHECK-NEXT: %[[DESCR2:.*]] = llvm.insertvalue %[[ALIGN_PTR]], %[[DESCR1]][1] : [[DESCR_TY]]
   // CHECK-NEXT: %[[DESCR3:.*]] = llvm.insertvalue %[[OFFSET]],    %[[DESCR2]][2] : [[DESCR_TY]]
@@ -445,13 +445,13 @@ func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
 // CHECK: [[INPUT:%.*]] = llvm.insertvalue {{.*}}[1] : !llvm.struct<(i64, ptr<i8>)>
 // CHECK: [[OUT_0:%.*]] = llvm.mlir.undef : [[TY:!.*]]
 // CHECK: [[DESCRIPTOR:%.*]] = llvm.extractvalue [[INPUT]][1] : !llvm.struct<(i64, ptr<i8>)>
-// CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
-// CHECK: [[BASE_PTR:%.*]] = llvm.load [[BASE_PTR_PTR]] : !llvm.ptr<ptr<float>>
-// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
+// CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
+// CHECK: [[BASE_PTR:%.*]] = llvm.load [[BASE_PTR_PTR]] : !llvm.ptr<ptr<f32>>
+// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
 // CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]]
-// CHECK-SAME: : (!llvm.ptr<ptr<float>>, i64) -> !llvm.ptr<ptr<float>>
-// CHECK: [[ALIGNED_PTR:%.*]] = llvm.load [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<float>>
+// CHECK-SAME: : (!llvm.ptr<ptr<f32>>, i64) -> !llvm.ptr<ptr<f32>>
+// CHECK: [[ALIGNED_PTR:%.*]] = llvm.load [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<f32>>
 // CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]]
 // CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]]
 // CHECK: [[OUT_3:%.*]] = llvm.insertvalue [[OFFSET]], [[OUT_2]][2] : [[TY]]
@@ -487,13 +487,13 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
 // CHECK: [[ALIGN_PTR:%.*]] = llvm.extractvalue [[INPUT]][1] : [[INPUT_TY]]
 // CHECK: [[OFFSET:%.*]] = llvm.extractvalue [[INPUT]][2] : [[INPUT_TY]]
 // CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[UNDERLYING_DESC]]
-// CHECK-SAME:                     !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
-// CHECK: llvm.store [[ALLOC_PTR]], [[BASE_PTR_PTR]] : !llvm.ptr<ptr<float>>
-// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
+// CHECK-SAME:                     !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
+// CHECK: llvm.store [[ALLOC_PTR]], [[BASE_PTR_PTR]] : !llvm.ptr<ptr<f32>>
+// CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
 // CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]]
-// CHECK: llvm.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<float>>
-// CHECK: [[BASE_PTR_PTR__:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<float>>
+// CHECK: llvm.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr<ptr<f32>>
+// CHECK: [[BASE_PTR_PTR__:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr<i8> to !llvm.ptr<ptr<f32>>
 // CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : i64
 // CHECK: [[OFFSET_PTR_:%.*]] = llvm.getelementptr [[BASE_PTR_PTR__]]{{\[}}[[C2]]]
 // CHECK: [[OFFSET_PTR:%.*]] = llvm.bitcast [[OFFSET_PTR_]]
@@ -501,7 +501,7 @@ func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
 
 // Iterate over shape operand in reverse order and set sizes and strides.
 // CHECK: [[STRUCT_PTR:%.*]] = llvm.bitcast [[UNDERLYING_DESC]]
-// CHECK-SAME: !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<float>, ptr<float>, i64, i64)>>
+// CHECK-SAME: !llvm.ptr<i8> to !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, i64)>>
 // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) : i64
 // CHECK: [[C3_I32:%.*]] = llvm.mlir.constant(3 : i32) : i32
 // CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[STRUCT_PTR]]{{\[}}[[C0]], [[C3_I32]]]

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
index d5f9aff52bff..334b21c3ef5b 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-funcs.mlir
@@ -6,7 +6,7 @@ func private @second_order_arg(%arg0 : () -> ())
 //CHECK: llvm.func @second_order_result() -> !llvm.ptr<func<void ()>>
 func private @second_order_result() -> (() -> ())
 
-//CHECK: llvm.func @second_order_multi_result() -> !llvm.struct<(ptr<func<i32 ()>>, ptr<func<i64 ()>>, ptr<func<float ()>>)>
+//CHECK: llvm.func @second_order_multi_result() -> !llvm.struct<(ptr<func<i32 ()>>, ptr<func<i64 ()>>, ptr<func<f32 ()>>)>
 func private @second_order_multi_result() -> (() -> (i32), () -> (i64), () -> (f32))
 
 //CHECK: llvm.func @third_order(!llvm.ptr<func<ptr<func<void ()>> (ptr<func<void ()>>)>>) -> !llvm.ptr<func<ptr<func<void ()>> (ptr<func<void ()>>)>>
@@ -19,11 +19,11 @@ func private @fifth_order_left(%arg0: (((() -> ()) -> ()) -> ()) -> ())
 func private @fifth_order_right(%arg0: () -> (() -> (() -> (() -> ()))))
 
 // Check that memrefs are converted to argument packs if appear as function arguments.
-// CHECK: llvm.func @memref_call_conv(!llvm.ptr<float>, !llvm.ptr<float>, i64, i64, i64)
+// CHECK: llvm.func @memref_call_conv(!llvm.ptr<f32>, !llvm.ptr<f32>, i64, i64, i64)
 func private @memref_call_conv(%arg0: memref<?xf32>)
 
 // Same in nested functions.
-// CHECK: llvm.func @memref_call_conv_nested(!llvm.ptr<func<void (ptr<float>, ptr<float>, i64, i64, i64)>>)
+// CHECK: llvm.func @memref_call_conv_nested(!llvm.ptr<func<void (ptr<f32>, ptr<f32>, i64, i64, i64)>>)
 func private @memref_call_conv_nested(%arg0: (memref<?xf32>) -> ())
 
 //CHECK-LABEL: llvm.func @pass_through(%arg0: !llvm.ptr<func<void ()>>) -> !llvm.ptr<func<void ()>> {
@@ -51,9 +51,9 @@ func @indirect_const_call(%arg0: i32) {
   return
 }
 
-// CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (float)>>, %arg1: !llvm.float) -> i32 {
+// CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (f32)>>, %arg1: f32) -> i32 {
 func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 {
-// CHECK-NEXT:  %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> i32
+// CHECK-NEXT:  %0 = llvm.call %arg0(%arg1) : (f32) -> i32
   %0 = call_indirect %arg0(%arg1) : (f32) -> i32
 // CHECK-NEXT:  llvm.return %0 : i32
   return %0 : i32

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
index b6133e840251..f13da7ae48c5 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-static-memref-ops.mlir
@@ -2,7 +2,7 @@
 // RUN: mlir-opt -convert-std-to-llvm='use-bare-ptr-memref-call-conv=1' -split-input-file %s | FileCheck %s --check-prefix=BAREPTR
 
 // BAREPTR-LABEL: func @check_noalias
-// BAREPTR-SAME: %{{.*}}: !llvm.ptr<float> {llvm.noalias = true}, %{{.*}}: !llvm.ptr<float> {llvm.noalias = true}
+// BAREPTR-SAME: %{{.*}}: !llvm.ptr<f32> {llvm.noalias = true}, %{{.*}}: !llvm.ptr<f32> {llvm.noalias = true}
 func @check_noalias(%static : memref<2xf32> {llvm.noalias = true}, %other : memref<2xf32> {llvm.noalias = true}) {
     return
 }
@@ -10,89 +10,89 @@ func @check_noalias(%static : memref<2xf32> {llvm.noalias = true}, %other : memr
 // -----
 
 // CHECK-LABEL: func @check_static_return
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-// CHECK-SAME: -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK-SAME: -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-LABEL: func @check_static_return
-// BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<float>) -> !llvm.ptr<float> {
+// BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<f32>) -> !llvm.ptr<f32> {
 func @check_static_return(%static : memref<32x18xf32>) -> memref<32x18xf32> {
-// CHECK:  llvm.return %{{.*}} : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK:  llvm.return %{{.*}} : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 
-// BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(0 : index) : i64
-// BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : i64
-// BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(18 : index) : i64
-// BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : i64
-// BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : i64
-// BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr<float>
+// BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr<f32>
   return %static : memref<32x18xf32>
 }
 
 // -----
 
 // CHECK-LABEL: func @check_static_return_with_offset
-// CHECK-COUNT-2: !llvm.ptr<float>
+// CHECK-COUNT-2: !llvm.ptr<f32>
 // CHECK-COUNT-5: i64
-// CHECK-SAME: -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK-SAME: -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-LABEL: func @check_static_return_with_offset
-// BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<float>) -> !llvm.ptr<float> {
+// BAREPTR-SAME: (%[[arg:.*]]: !llvm.ptr<f32>) -> !llvm.ptr<f32> {
 func @check_static_return_with_offset(%static : memref<32x18xf32, offset:7, strides:[22,1]>) -> memref<32x18xf32, offset:7, strides:[22,1]> {
-// CHECK:  llvm.return %{{.*}} : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK:  llvm.return %{{.*}} : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 
-// BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR: %[[udf:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[base0:.*]] = llvm.insertvalue %[[arg]], %[[udf]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[aligned:.*]] = llvm.insertvalue %[[arg]], %[[base0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val0:.*]] = llvm.mlir.constant(7 : index) : i64
-// BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins0:.*]] = llvm.insertvalue %[[val0]], %[[aligned]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val1:.*]] = llvm.mlir.constant(32 : index) : i64
-// BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins1:.*]] = llvm.insertvalue %[[val1]], %[[ins0]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val2:.*]] = llvm.mlir.constant(22 : index) : i64
-// BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins2:.*]] = llvm.insertvalue %[[val2]], %[[ins1]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val3:.*]] = llvm.mlir.constant(18 : index) : i64
-// BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[ins3:.*]] = llvm.insertvalue %[[val3]], %[[ins2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[val4:.*]] = llvm.mlir.constant(1 : index) : i64
-// BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr<float>
+// BAREPTR-NEXT: %[[ins4:.*]] = llvm.insertvalue %[[val4]], %[[ins3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[base1:.*]] = llvm.extractvalue %[[ins4]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: llvm.return %[[base1]] : !llvm.ptr<f32>
   return %static : memref<32x18xf32, offset:7, strides:[22,1]>
 }
 
 // -----
 
-// CHECK-LABEL: func @zero_d_alloc() -> !llvm.struct<(ptr<float>, ptr<float>, i64)> {
-// BAREPTR-LABEL: func @zero_d_alloc() -> !llvm.ptr<float> {
+// CHECK-LABEL: func @zero_d_alloc() -> !llvm.struct<(ptr<f32>, ptr<f32>, i64)> {
+// BAREPTR-LABEL: func @zero_d_alloc() -> !llvm.ptr<f32> {
 func @zero_d_alloc() -> memref<f32> {
 // CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 // CHECK-NEXT:  llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
-// CHECK-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-// CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// CHECK-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// CHECK-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+// CHECK-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<f32>
+// CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// CHECK-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// CHECK-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 // CHECK-NEXT:  %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
-// CHECK-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+// CHECK-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 
 // BAREPTR-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
-// BAREPTR-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// BAREPTR-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// BAREPTR-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// BAREPTR-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 // BAREPTR-NEXT:  llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
-// BAREPTR-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-// BAREPTR-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// BAREPTR-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// BAREPTR-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+// BAREPTR-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<f32>
+// BAREPTR-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// BAREPTR-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// BAREPTR-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 // BAREPTR-NEXT:  %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
-// BAREPTR-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+// BAREPTR-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
   %0 = alloc() : memref<f32>
   return %0 : memref<f32>
 }
@@ -100,14 +100,14 @@ func @zero_d_alloc() -> memref<f32> {
 // -----
 
 // CHECK-LABEL: func @zero_d_dealloc
-// BAREPTR-LABEL: func @zero_d_dealloc(%{{.*}}: !llvm.ptr<float>) {
+// BAREPTR-LABEL: func @zero_d_dealloc(%{{.*}}: !llvm.ptr<f32>) {
 func @zero_d_dealloc(%arg0: memref<f32>) {
-//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// CHECK-NEXT:  %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<float> to !llvm.ptr<i8>
+//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// CHECK-NEXT:  %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // CHECK-NEXT:  llvm.call @free(%[[bc]]) : (!llvm.ptr<i8>) -> ()
 
-// BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// BAREPTR-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<float> to !llvm.ptr<i8>
+// BAREPTR: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// BAREPTR-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // BAREPTR-NEXT: llvm.call @free(%[[bc]]) : (!llvm.ptr<i8>) -> ()
   dealloc %arg0 : memref<f32>
   return
@@ -120,94 +120,94 @@ func @zero_d_dealloc(%arg0: memref<f32>) {
 func @aligned_1d_alloc() -> memref<42xf32> {
 // CHECK-NEXT:  %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64
 // CHECK-NEXT:  %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 // CHECK-NEXT:  %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64
 // CHECK-NEXT:  %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : i64
 // CHECK-NEXT:  %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (i64) -> !llvm.ptr<i8>
-// CHECK-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-// CHECK-NEXT:  %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<float> to i64
+// CHECK-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<f32>
+// CHECK-NEXT:  %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<f32> to i64
 // CHECK-NEXT:  %[[one_1:.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT:  %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_1]] : i64
 // CHECK-NEXT:  %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : i64
 // CHECK-NEXT:  %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : i64
 // CHECK-NEXT:  %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : i64
-// CHECK-NEXT:  %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr<float>
-// CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// CHECK-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// CHECK-NEXT:  llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+// CHECK-NEXT:  %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr<f32>
+// CHECK-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+// CHECK-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+// CHECK-NEXT:  llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 // CHECK-NEXT:  %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
-// CHECK-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+// CHECK-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 
 // BAREPTR-NEXT:  %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64
 // BAREPTR-NEXT:  %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
-// BAREPTR-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// BAREPTR-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// BAREPTR-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// BAREPTR-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 // BAREPTR-NEXT:  %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64
 // BAREPTR-NEXT:  %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : i64
 // BAREPTR-NEXT:  %[[allocated:.*]] = llvm.call @malloc(%[[allocsize]]) : (i64) -> !llvm.ptr<i8>
-// BAREPTR-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<float>
-// BAREPTR-NEXT:  %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT:  %[[ptr:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<i8> to !llvm.ptr<f32>
+// BAREPTR-NEXT:  %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr<f32> to i64
 // BAREPTR-NEXT:  %[[one_2:.*]] = llvm.mlir.constant(1 : index) : i64
 // BAREPTR-NEXT:  %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_2]] : i64
 // BAREPTR-NEXT:  %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : i64
 // BAREPTR-NEXT:  %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : i64
 // BAREPTR-NEXT:  %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : i64
-// BAREPTR-NEXT:  %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr<float>
-// BAREPTR-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// BAREPTR-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-// BAREPTR-NEXT:  llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+// BAREPTR-NEXT:  %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr<f32>
+// BAREPTR-NEXT:  llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+// BAREPTR-NEXT:  llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+// BAREPTR-NEXT:  llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 // BAREPTR-NEXT:  %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
-// BAREPTR-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+// BAREPTR-NEXT:  llvm.insertvalue %[[c0]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
   %0 = alloc() {alignment = 8} : memref<42xf32>
   return %0 : memref<42xf32>
 }
 
 // -----
 
-// CHECK-LABEL: func @static_alloc() -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
-// BAREPTR-LABEL: func @static_alloc() -> !llvm.ptr<float> {
+// CHECK-LABEL: func @static_alloc() -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> {
+// BAREPTR-LABEL: func @static_alloc() -> !llvm.ptr<f32> {
 func @static_alloc() -> memref<32x18xf32> {
 //      CHECK:  %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
-// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 // CHECK-NEXT:  %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
-// CHECK-NEXT:  llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<float>
+// CHECK-NEXT:  llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<f32>
 
 // BAREPTR:      %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
-// BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
+// BAREPTR-NEXT: %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// BAREPTR-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// BAREPTR-NEXT: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 // BAREPTR-NEXT: %[[allocated:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr<i8>
-// BAREPTR-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<float>
+// BAREPTR-NEXT: llvm.bitcast %[[allocated]] : !llvm.ptr<i8> to !llvm.ptr<f32>
  %0 = alloc() : memref<32x18xf32>
  return %0 : memref<32x18xf32>
 }
 
 // -----
 
-// CHECK-LABEL: func @static_alloca() -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)> {
+// CHECK-LABEL: func @static_alloca() -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> {
 func @static_alloca() -> memref<32x18xf32> {
 // CHECK-NEXT:  %[[sz1:.*]] = llvm.mlir.constant(32 : index) : i64
 // CHECK-NEXT:  %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64
 // CHECK-NEXT:  %[[st2:.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT:  %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
-// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<float>
-// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<float> to i64
-// CHECK-NEXT:  %[[allocated:.*]] = llvm.alloca %[[size_bytes]] x !llvm.float : (i64) -> !llvm.ptr<float>
+// CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// CHECK-NEXT:  %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
+// CHECK-NEXT:  %[[allocated:.*]] = llvm.alloca %[[size_bytes]] x f32 : (i64) -> !llvm.ptr<f32>
  %0 = alloca() : memref<32x18xf32>
 
  // Test with explicitly specified alignment. llvm.alloca takes care of the
  // alignment. The same pointer is thus used for allocation and aligned
  // accesses.
- // CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x !llvm.float {alignment = 32 : i64} : (i64) -> !llvm.ptr<float>
- // CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+ // CHECK: %[[alloca_aligned:.*]] = llvm.alloca %{{.*}} x f32 {alignment = 32 : i64} : (i64) -> !llvm.ptr<f32>
+ // CHECK: %[[desc:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+ // CHECK: %[[desc1:.*]] = llvm.insertvalue %[[alloca_aligned]], %[[desc]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+ // CHECK: llvm.insertvalue %[[alloca_aligned]], %[[desc1]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
  alloca() {alignment = 32} : memref<32x18xf32>
  return %0 : memref<32x18xf32>
 }
@@ -215,14 +215,14 @@ func @static_alloca() -> memref<32x18xf32> {
 // -----
 
 // CHECK-LABEL: func @static_dealloc
-// BAREPTR-LABEL: func @static_dealloc(%{{.*}}: !llvm.ptr<float>) {
+// BAREPTR-LABEL: func @static_dealloc(%{{.*}}: !llvm.ptr<f32>) {
 func @static_dealloc(%static: memref<10x8xf32>) {
-//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// CHECK-NEXT:  %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<float> to !llvm.ptr<i8>
+//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// CHECK-NEXT:  %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // CHECK-NEXT:  llvm.call @free(%[[bc]]) : (!llvm.ptr<i8>) -> ()
 
-// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-// BAREPTR-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<float> to !llvm.ptr<i8>
+// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR-NEXT: %[[bc:.*]] = llvm.bitcast %[[ptr]] : !llvm.ptr<f32> to !llvm.ptr<i8>
 // BAREPTR-NEXT: llvm.call @free(%[[bc]]) : (!llvm.ptr<i8>) -> ()
   dealloc %static : memref<10x8xf32>
   return
@@ -231,13 +231,13 @@ func @static_dealloc(%static: memref<10x8xf32>) {
 // -----
 
 // CHECK-LABEL: func @zero_d_load
-// BAREPTR-LABEL: func @zero_d_load(%{{.*}}: !llvm.ptr<float>) -> !llvm.float
+// BAREPTR-LABEL: func @zero_d_load(%{{.*}}: !llvm.ptr<f32>) -> f32
 func @zero_d_load(%arg0: memref<f32>) -> f32 {
-//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// CHECK-NEXT:  %{{.*}} = llvm.load %[[ptr]] : !llvm.ptr<float>
+//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// CHECK-NEXT:  %{{.*}} = llvm.load %[[ptr]] : !llvm.ptr<f32>
 
-// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// BAREPTR-NEXT: llvm.load %[[ptr:.*]] : !llvm.ptr<float>
+// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// BAREPTR-NEXT: llvm.load %[[ptr:.*]] : !llvm.ptr<f32>
   %0 = load %arg0[] : memref<f32>
   return %0 : f32
 }
@@ -245,26 +245,26 @@ func @zero_d_load(%arg0: memref<f32>) -> f32 {
 // -----
 
 // CHECK-LABEL: func @static_load(
-// CHECK-COUNT-2: !llvm.ptr<float>,
+// CHECK-COUNT-2: !llvm.ptr<f32>,
 // CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64
 // CHECK:         %[[I:.*]]: i64,
 // CHECK:         %[[J:.*]]: i64)
 // BAREPTR-LABEL: func @static_load
-// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr<float>, %[[I:.*]]: i64, %[[J:.*]]: i64) {
+// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr<f32>, %[[I:.*]]: i64, %[[J:.*]]: i64) {
 func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
-//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
 //  CHECK-NEXT:  %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 //  CHECK-NEXT:  %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// CHECK-NEXT:  llvm.load %[[addr]] : !llvm.ptr<float>
+//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// CHECK-NEXT:  llvm.load %[[addr]] : !llvm.ptr<f32>
 
-// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
 // BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 // BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT: llvm.load %[[addr]] : !llvm.ptr<float>
+// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// BAREPTR-NEXT: llvm.load %[[addr]] : !llvm.ptr<f32>
   %0 = load %static[%i, %j] : memref<10x42xf32>
   return
 }
@@ -273,13 +273,13 @@ func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
 
 // CHECK-LABEL: func @zero_d_store
 // BAREPTR-LABEL: func @zero_d_store
-// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr<float>, %[[val:.*]]: !llvm.float)
+// BAREPTR-SAME: (%[[A:.*]]: !llvm.ptr<f32>, %[[val:.*]]: f32)
 func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
-//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// CHECK-NEXT:  llvm.store %{{.*}}, %[[ptr]] : !llvm.ptr<float>
+//      CHECK:  %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// CHECK-NEXT:  llvm.store %{{.*}}, %[[ptr]] : !llvm.ptr<f32>
 
-// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-// BAREPTR-NEXT: llvm.store %[[val]], %[[ptr]] : !llvm.ptr<float>
+// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+// BAREPTR-NEXT: llvm.store %[[val]], %[[ptr]] : !llvm.ptr<f32>
   store %arg1, %arg0[] : memref<f32>
   return
 }
@@ -287,8 +287,8 @@ func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
 // -----
 
 // CHECK-LABEL: func @static_store
-// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>
-// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>
 // CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i64
@@ -297,23 +297,23 @@ func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
 // CHECK-SAME:         %[[I:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[J:[a-zA-Z0-9]*]]: i64
 // BAREPTR-LABEL: func @static_store
-// BAREPTR-SAME: %[[A:.*]]: !llvm.ptr<float>
+// BAREPTR-SAME: %[[A:.*]]: !llvm.ptr<f32>
 // BAREPTR-SAME:         %[[I:[a-zA-Z0-9]*]]: i64
 // BAREPTR-SAME:         %[[J:[a-zA-Z0-9]*]]: i64
 func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f32) {
-//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:  %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //  CHECK-NEXT:  %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
 //  CHECK-NEXT:  %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 //  CHECK-NEXT:  %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-//  CHECK-NEXT:  llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
+//  CHECK-NEXT:  %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+//  CHECK-NEXT:  llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<f32>
 
-// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+// BAREPTR:      %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 // BAREPTR-NEXT: %[[st0:.*]] = llvm.mlir.constant(42 : index) : i64
 // BAREPTR-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
 // BAREPTR-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
-// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-// BAREPTR-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<float>
+// BAREPTR-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+// BAREPTR-NEXT: llvm.store %{{.*}}, %[[addr]] : !llvm.ptr<f32>
   store %val, %static[%i, %j] : memref<10x42xf32>
   return
 }
@@ -321,10 +321,10 @@ func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %val : f
 // -----
 
 // CHECK-LABEL: func @static_memref_dim
-// BAREPTR-LABEL: func @static_memref_dim(%{{.*}}: !llvm.ptr<float>) {
+// BAREPTR-LABEL: func @static_memref_dim(%{{.*}}: !llvm.ptr<f32>) {
 func @static_memref_dim(%static : memref<42x32x15x13x27xf32>) {
 // CHECK:        llvm.mlir.constant(42 : index) : i64
-// BAREPTR:      llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+// BAREPTR:      llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 // BAREPTR: llvm.mlir.constant(42 : index) : i64
   %c0 = constant 0 : index
   %0 = dim %static, %c0 : memref<42x32x15x13x27xf32>
@@ -375,13 +375,13 @@ func @check_memref_func_call(%in : memref<10xi8>) -> memref<20xi8> {
 
 // -----
 
-// BAREPTR: llvm.func @goo(!llvm.float) -> !llvm.float
+// BAREPTR: llvm.func @goo(f32) -> f32
 func private @goo(f32) -> f32
 
 // BAREPTR-LABEL: func @check_scalar_func_call
-// BAREPTR-SAME:    %[[in:.*]]: !llvm.float)
+// BAREPTR-SAME:    %[[in:.*]]: f32)
 func @check_scalar_func_call(%in : f32) {
-  // BAREPTR-NEXT:    %[[call:.*]] = llvm.call @goo(%[[in]]) : (!llvm.float) -> !llvm.float
+  // BAREPTR-NEXT:    %[[call:.*]] = llvm.call @goo(%[[in]]) : (f32) -> f32
   %res = call @goo(%in) : (f32) -> (f32)
   return
 }

diff  --git a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
index e43db3ca55bc..304d62c3935d 100644
--- a/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/convert-to-llvmir.mlir
@@ -66,13 +66,13 @@ func @simple_loop() {
 }
 
 // CHECK-LABEL: llvm.func @complex_numbers()
-// CHECK-NEXT:    %[[REAL0:.*]] = llvm.mlir.constant(1.200000e+00 : f32) : !llvm.float
-// CHECK-NEXT:    %[[IMAG0:.*]] = llvm.mlir.constant(3.400000e+00 : f32) : !llvm.float
-// CHECK-NEXT:    %[[CPLX0:.*]] = llvm.mlir.undef : !llvm.struct<(float, float)>
-// CHECK-NEXT:    %[[CPLX1:.*]] = llvm.insertvalue %[[REAL0]], %[[CPLX0]][0] : !llvm.struct<(float, float)>
-// CHECK-NEXT:    %[[CPLX2:.*]] = llvm.insertvalue %[[IMAG0]], %[[CPLX1]][1] : !llvm.struct<(float, float)>
-// CHECK-NEXT:    %[[REAL1:.*]] = llvm.extractvalue %[[CPLX2:.*]][0] : !llvm.struct<(float, float)>
-// CHECK-NEXT:    %[[IMAG1:.*]] = llvm.extractvalue %[[CPLX2:.*]][1] : !llvm.struct<(float, float)>
+// CHECK-NEXT:    %[[REAL0:.*]] = llvm.mlir.constant(1.200000e+00 : f32) : f32
+// CHECK-NEXT:    %[[IMAG0:.*]] = llvm.mlir.constant(3.400000e+00 : f32) : f32
+// CHECK-NEXT:    %[[CPLX0:.*]] = llvm.mlir.undef : !llvm.struct<(f32, f32)>
+// CHECK-NEXT:    %[[CPLX1:.*]] = llvm.insertvalue %[[REAL0]], %[[CPLX0]][0] : !llvm.struct<(f32, f32)>
+// CHECK-NEXT:    %[[CPLX2:.*]] = llvm.insertvalue %[[IMAG0]], %[[CPLX1]][1] : !llvm.struct<(f32, f32)>
+// CHECK-NEXT:    %[[REAL1:.*]] = llvm.extractvalue %[[CPLX2:.*]][0] : !llvm.struct<(f32, f32)>
+// CHECK-NEXT:    %[[IMAG1:.*]] = llvm.extractvalue %[[CPLX2:.*]][1] : !llvm.struct<(f32, f32)>
 // CHECK-NEXT:    llvm.return
 func @complex_numbers() {
   %real0 = constant 1.2 : f32
@@ -84,15 +84,15 @@ func @complex_numbers() {
 }
 
 // CHECK-LABEL: llvm.func @complex_addition()
-// CHECK-DAG:     %[[A_REAL:.*]] = llvm.extractvalue %[[A:.*]][0] : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[B_REAL:.*]] = llvm.extractvalue %[[B:.*]][0] : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[A_IMAG:.*]] = llvm.extractvalue %[[A]][1] : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[B_IMAG:.*]] = llvm.extractvalue %[[B]][1] : !llvm.struct<(double, double)>
-// CHECK:         %[[C0:.*]] = llvm.mlir.undef : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[C_REAL:.*]] = llvm.fadd %[[A_REAL]], %[[B_REAL]] : !llvm.double
-// CHECK-DAG:     %[[C_IMAG:.*]] = llvm.fadd %[[A_IMAG]], %[[B_IMAG]] : !llvm.double
-// CHECK:         %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(double, double)>
-// CHECK:         %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(double, double)>
+// CHECK-DAG:     %[[A_REAL:.*]] = llvm.extractvalue %[[A:.*]][0] : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[B_REAL:.*]] = llvm.extractvalue %[[B:.*]][0] : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[A_IMAG:.*]] = llvm.extractvalue %[[A]][1] : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[B_IMAG:.*]] = llvm.extractvalue %[[B]][1] : !llvm.struct<(f64, f64)>
+// CHECK:         %[[C0:.*]] = llvm.mlir.undef : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[C_REAL:.*]] = llvm.fadd %[[A_REAL]], %[[B_REAL]] : f64
+// CHECK-DAG:     %[[C_IMAG:.*]] = llvm.fadd %[[A_IMAG]], %[[B_IMAG]] : f64
+// CHECK:         %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(f64, f64)>
+// CHECK:         %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(f64, f64)>
 func @complex_addition() {
   %a_re = constant 1.2 : f64
   %a_im = constant 3.4 : f64
@@ -105,15 +105,15 @@ func @complex_addition() {
 }
 
 // CHECK-LABEL: llvm.func @complex_substraction()
-// CHECK-DAG:     %[[A_REAL:.*]] = llvm.extractvalue %[[A:.*]][0] : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[B_REAL:.*]] = llvm.extractvalue %[[B:.*]][0] : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[A_IMAG:.*]] = llvm.extractvalue %[[A]][1] : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[B_IMAG:.*]] = llvm.extractvalue %[[B]][1] : !llvm.struct<(double, double)>
-// CHECK:         %[[C0:.*]] = llvm.mlir.undef : !llvm.struct<(double, double)>
-// CHECK-DAG:     %[[C_REAL:.*]] = llvm.fsub %[[A_REAL]], %[[B_REAL]] : !llvm.double
-// CHECK-DAG:     %[[C_IMAG:.*]] = llvm.fsub %[[A_IMAG]], %[[B_IMAG]] : !llvm.double
-// CHECK:         %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(double, double)>
-// CHECK:         %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(double, double)>
+// CHECK-DAG:     %[[A_REAL:.*]] = llvm.extractvalue %[[A:.*]][0] : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[B_REAL:.*]] = llvm.extractvalue %[[B:.*]][0] : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[A_IMAG:.*]] = llvm.extractvalue %[[A]][1] : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[B_IMAG:.*]] = llvm.extractvalue %[[B]][1] : !llvm.struct<(f64, f64)>
+// CHECK:         %[[C0:.*]] = llvm.mlir.undef : !llvm.struct<(f64, f64)>
+// CHECK-DAG:     %[[C_REAL:.*]] = llvm.fsub %[[A_REAL]], %[[B_REAL]] : f64
+// CHECK-DAG:     %[[C_IMAG:.*]] = llvm.fsub %[[A_IMAG]], %[[B_IMAG]] : f64
+// CHECK:         %[[C1:.*]] = llvm.insertvalue %[[C_REAL]], %[[C0]][0] : !llvm.struct<(f64, f64)>
+// CHECK:         %[[C2:.*]] = llvm.insertvalue %[[C_IMAG]], %[[C1]][1] : !llvm.struct<(f64, f64)>
 func @complex_substraction() {
   %a_re = constant 1.2 : f64
   %a_im = constant 3.4 : f64
@@ -425,41 +425,41 @@ func @more_imperfectly_nested_loops() {
 
 // CHECK-LABEL: llvm.func @get_i64() -> i64
 func private @get_i64() -> (i64)
-// CHECK-LABEL: llvm.func @get_f32() -> !llvm.float
+// CHECK-LABEL: llvm.func @get_f32() -> f32
 func private @get_f32() -> (f32)
-// CHECK-LABEL: llvm.func @get_c16() -> !llvm.struct<(half, half)>
+// CHECK-LABEL: llvm.func @get_c16() -> !llvm.struct<(f16, f16)>
 func private @get_c16() -> (complex<f16>)
-// CHECK-LABEL: llvm.func @get_c32() -> !llvm.struct<(float, float)>
+// CHECK-LABEL: llvm.func @get_c32() -> !llvm.struct<(f32, f32)>
 func private @get_c32() -> (complex<f32>)
-// CHECK-LABEL: llvm.func @get_c64() -> !llvm.struct<(double, double)>
+// CHECK-LABEL: llvm.func @get_c64() -> !llvm.struct<(f64, f64)>
 func private @get_c64() -> (complex<f64>)
-// CHECK-LABEL: llvm.func @get_memref() -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>
-// CHECK32-LABEL: llvm.func @get_memref() -> !llvm.struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>
+// CHECK-LABEL: llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>
+// CHECK32-LABEL: llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>
 func private @get_memref() -> (memref<42x?x10x?xf32>)
 
-// CHECK-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)> {
-// CHECK32-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)> {
+// CHECK-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)> {
+// CHECK32-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)> {
 func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
 ^bb0:
 // CHECK-NEXT:  {{.*}} = llvm.call @get_i64() : () -> i64
-// CHECK-NEXT:  {{.*}} = llvm.call @get_f32() : () -> !llvm.float
-// CHECK-NEXT:  {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>
+// CHECK-NEXT:  {{.*}} = llvm.call @get_f32() : () -> f32
+// CHECK-NEXT:  {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>
 // CHECK32-NEXT:  {{.*}} = llvm.call @get_i64() : () -> i64
-// CHECK32-NEXT:  {{.*}} = llvm.call @get_f32() : () -> !llvm.float
-// CHECK32-NEXT:  {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>
+// CHECK32-NEXT:  {{.*}} = llvm.call @get_f32() : () -> f32
+// CHECK32-NEXT:  {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>
   %0 = call @get_i64() : () -> (i64)
   %1 = call @get_f32() : () -> (f32)
   %2 = call @get_memref() : () -> (memref<42x?x10x?xf32>)
-// CHECK-NEXT:  {{.*}} = llvm.mlir.undef : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[0] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[1] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  llvm.return {{.*}} : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.mlir.undef : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[0] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[1] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
-// CHECK32-NEXT:  llvm.return {{.*}} : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK-NEXT:  llvm.return {{.*}} : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK32-NEXT:  llvm.return {{.*}} : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
   return %0, %1, %2 : i64, f32, memref<42x?x10x?xf32>
 }
 
@@ -468,30 +468,30 @@ func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
 // CHECK32-LABEL: llvm.func @multireturn_caller() {
 func @multireturn_caller() {
 ^bb0:
-// CHECK-NEXT:  {{.*}} = llvm.call @multireturn() : () -> !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[0] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[1] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[2] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.call @multireturn() : () -> !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[0] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[1] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
-// CHECK32-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[2] : !llvm.struct<(i64, float, struct<(ptr<float>, ptr<float>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
+// CHECK32-NEXT:  {{.*}} = llvm.extractvalue {{.*}}[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i32, array<4 x i32>, array<4 x i32>)>)>
   %0:3 = call @multireturn() : () -> (i64, f32, memref<42x?x10x?xf32>)
   %1 = constant 42 : i64
 // CHECK:       {{.*}} = llvm.add {{.*}}, {{.*}} : i64
   %2 = addi %0#0, %1 : i64
   %3 = constant 42.0 : f32
-// CHECK:       {{.*}} = llvm.fadd {{.*}}, {{.*}} : !llvm.float
+// CHECK:       {{.*}} = llvm.fadd {{.*}}, {{.*}} : f32
   %4 = addf %0#1, %3 : f32
   %5 = constant 0 : index
   return
 }
 
-// CHECK-LABEL: llvm.func @vector_ops(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.vec<4 x i1>, %arg2: !llvm.vec<4 x i64>, %arg3: !llvm.vec<4 x i64>) -> !llvm.vec<4 x float> {
+// CHECK-LABEL: llvm.func @vector_ops(%arg0: !llvm.vec<4 x f32>, %arg1: !llvm.vec<4 x i1>, %arg2: !llvm.vec<4 x i64>, %arg3: !llvm.vec<4 x i64>) -> !llvm.vec<4 x f32> {
 func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>, %arg3: vector<4xi64>) -> vector<4xf32> {
-// CHECK-NEXT:  %0 = llvm.mlir.constant(dense<4.200000e+01> : vector<4xf32>) : !llvm.vec<4 x float>
+// CHECK-NEXT:  %0 = llvm.mlir.constant(dense<4.200000e+01> : vector<4xf32>) : !llvm.vec<4 x f32>
   %0 = constant dense<42.> : vector<4xf32>
-// CHECK-NEXT:  %1 = llvm.fadd %arg0, %0 : !llvm.vec<4 x float>
+// CHECK-NEXT:  %1 = llvm.fadd %arg0, %0 : !llvm.vec<4 x f32>
   %1 = addf %arg0, %0 : vector<4xf32>
 // CHECK-NEXT:  %2 = llvm.sdiv %arg2, %arg2 : !llvm.vec<4 x i64>
   %3 = divi_signed %arg2, %arg2 : vector<4xi64>
@@ -501,9 +501,9 @@ func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>
   %5 = remi_signed %arg2, %arg2 : vector<4xi64>
 // CHECK-NEXT:  %5 = llvm.urem %arg2, %arg2 : !llvm.vec<4 x i64>
   %6 = remi_unsigned %arg2, %arg2 : vector<4xi64>
-// CHECK-NEXT:  %6 = llvm.fdiv %arg0, %0 : !llvm.vec<4 x float>
+// CHECK-NEXT:  %6 = llvm.fdiv %arg0, %0 : !llvm.vec<4 x f32>
   %7 = divf %arg0, %0 : vector<4xf32>
-// CHECK-NEXT:  %7 = llvm.frem %arg0, %0 : !llvm.vec<4 x float>
+// CHECK-NEXT:  %7 = llvm.frem %arg0, %0 : !llvm.vec<4 x f32>
   %8 = remf %arg0, %0 : vector<4xf32>
 // CHECK-NEXT:  %8 = llvm.and %arg2, %arg3 : !llvm.vec<4 x i64>
   %9 = and %arg2, %arg3 : vector<4xi64>
@@ -523,7 +523,7 @@ func @vector_ops(%arg0: vector<4xf32>, %arg1: vector<4xi1>, %arg2: vector<4xi64>
 // CHECK-LABEL: @ops
 func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
 ^bb0(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32, %arg4: f64):
-// CHECK-NEXT:  %0 = llvm.fsub %arg0, %arg1 : !llvm.float
+// CHECK-NEXT:  %0 = llvm.fsub %arg0, %arg1 : f32
   %0 = subf %arg0, %arg1: f32
 // CHECK-NEXT:  %1 = llvm.sub %arg2, %arg3 : i32
   %1 = subi %arg2, %arg3: i32
@@ -539,9 +539,9 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
   %6 = remi_unsigned %arg2, %arg3 : i32
 // CHECK-NEXT:  %7 = llvm.select %2, %arg2, %arg3 : i1, i32
   %7 = select %2, %arg2, %arg3 : i32
-// CHECK-NEXT:  %8 = llvm.fdiv %arg0, %arg1 : !llvm.float
+// CHECK-NEXT:  %8 = llvm.fdiv %arg0, %arg1 : f32
   %8 = divf %arg0, %arg1 : f32
-// CHECK-NEXT:  %9 = llvm.frem %arg0, %arg1 : !llvm.float
+// CHECK-NEXT:  %9 = llvm.frem %arg0, %arg1 : f32
   %9 = remf %arg0, %arg1 : f32
 // CHECK-NEXT: %10 = llvm.and %arg2, %arg3 : i32
   %10 = and %arg2, %arg3 : i32
@@ -549,11 +549,11 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
   %11 = or %arg2, %arg3 : i32
 // CHECK-NEXT: %12 = llvm.xor %arg2, %arg3 : i32
   %12 = xor %arg2, %arg3 : i32
-// CHECK-NEXT: %13 = "llvm.intr.exp"(%arg0) : (!llvm.float) -> !llvm.float
+// CHECK-NEXT: %13 = "llvm.intr.exp"(%arg0) : (f32) -> f32
   %13 = std.exp %arg0 : f32
-// CHECK-NEXT: %14 = "llvm.intr.exp2"(%arg0) : (!llvm.float) -> !llvm.float
+// CHECK-NEXT: %14 = "llvm.intr.exp2"(%arg0) : (f32) -> f32
   %14 = std.exp2 %arg0 : f32
-// CHECK-NEXT: %15 = llvm.mlir.constant(7.900000e-01 : f64) : !llvm.double
+// CHECK-NEXT: %15 = llvm.mlir.constant(7.900000e-01 : f64) : f64
   %15 = constant 7.9e-01 : f64
 // CHECK-NEXT: %16 = llvm.shl %arg2, %arg3 : i32
   %16 = shift_left %arg2, %arg3 : i32
@@ -561,9 +561,9 @@ func @ops(f32, f32, i32, i32, f64) -> (f32, i32) {
   %17 = shift_right_signed %arg2, %arg3 : i32
 // CHECK-NEXT: %18 = llvm.lshr %arg2, %arg3 : i32
   %18 = shift_right_unsigned %arg2, %arg3 : i32
-// CHECK-NEXT: %{{[0-9]+}} = "llvm.intr.sqrt"(%arg0) : (!llvm.float) -> !llvm.float
+// CHECK-NEXT: %{{[0-9]+}} = "llvm.intr.sqrt"(%arg0) : (f32) -> f32
   %19 = std.sqrt %arg0 : f32
-// CHECK-NEXT: %{{[0-9]+}} = "llvm.intr.sqrt"(%arg4) : (!llvm.double) -> !llvm.double
+// CHECK-NEXT: %{{[0-9]+}} = "llvm.intr.sqrt"(%arg4) : (f64) -> f64
   %20 = std.sqrt %arg4 : f64
   return %0, %4 : f32, i32
 }
@@ -583,13 +583,13 @@ func @index_cast(%arg0: index, %arg1: i1) {
 // Checking conversion of signed integer types to floating point.
 // CHECK-LABEL: @sitofp
 func @sitofp(%arg0 : i32, %arg1 : i64) {
-// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to !llvm.float
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to f32
   %0 = sitofp %arg0: i32 to f32
-// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to !llvm.double
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i32 to f64
   %1 = sitofp %arg0: i32 to f64
-// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to !llvm.float
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to f32
   %2 = sitofp %arg1: i64 to f32
-// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to !llvm.double
+// CHECK-NEXT: = llvm.sitofp {{.*}} : i64 to f64
   %3 = sitofp %arg1: i64 to f64
   return
 }
@@ -597,17 +597,17 @@ func @sitofp(%arg0 : i32, %arg1 : i64) {
 // Checking conversion of integer vectors to floating point vector types.
 // CHECK-LABEL: @sitofp_vector
 func @sitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector<2xi64>) {
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x f32>
   %0 = sitofp %arg0: vector<2xi16> to vector<2xf32>
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x f64>
   %1 = sitofp %arg0: vector<2xi16> to vector<2xf64>
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x f32>
   %2 = sitofp %arg1: vector<2xi32> to vector<2xf32>
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x f64>
   %3 = sitofp %arg1: vector<2xi32> to vector<2xf64>
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x f32>
   %4 = sitofp %arg2: vector<2xi64> to vector<2xf32>
-// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.sitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x f64>
   %5 = sitofp %arg2: vector<2xi64> to vector<2xf64>
   return
 }
@@ -615,13 +615,13 @@ func @sitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector
 // Checking conversion of unsigned integer types to floating point.
 // CHECK-LABEL: @uitofp
 func @uitofp(%arg0 : i32, %arg1 : i64) {
-// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to !llvm.float
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to f32
   %0 = uitofp %arg0: i32 to f32
-// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to !llvm.double
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i32 to f64
   %1 = uitofp %arg0: i32 to f64
-// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to !llvm.float
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to f32
   %2 = uitofp %arg1: i64 to f32
-// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to !llvm.double
+// CHECK-NEXT: = llvm.uitofp {{.*}} : i64 to f64
   %3 = uitofp %arg1: i64 to f64
   return
 }
@@ -629,11 +629,11 @@ func @uitofp(%arg0 : i32, %arg1 : i64) {
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fpext
 func @fpext(%arg0 : f16, %arg1 : f32) {
-// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.half to !llvm.float
+// CHECK-NEXT: = llvm.fpext {{.*}} : f16 to f32
   %0 = fpext %arg0: f16 to f32
-// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.half to !llvm.double
+// CHECK-NEXT: = llvm.fpext {{.*}} : f16 to f64
   %1 = fpext %arg0: f16 to f64
-// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.float to !llvm.double
+// CHECK-NEXT: = llvm.fpext {{.*}} : f32 to f64
   %2 = fpext %arg1: f32 to f64
   return
 }
@@ -641,11 +641,11 @@ func @fpext(%arg0 : f16, %arg1 : f32) {
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fpext
 func @fpext_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>) {
-// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.vec<2 x half> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.vec<2 x f16> to !llvm.vec<2 x f32>
   %0 = fpext %arg0: vector<2xf16> to vector<2xf32>
-// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.vec<2 x half> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.vec<2 x f16> to !llvm.vec<2 x f64>
   %1 = fpext %arg0: vector<2xf16> to vector<2xf64>
-// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.fpext {{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x f64>
   %2 = fpext %arg1: vector<2xf32> to vector<2xf64>
   return
 }
@@ -653,13 +653,13 @@ func @fpext_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>) {
 // Checking conversion of floating point to integer types.
 // CHECK-LABEL: @fptosi
 func @fptosi(%arg0 : f32, %arg1 : f64) {
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to i32
+// CHECK-NEXT: = llvm.fptosi {{.*}} : f32 to i32
   %0 = fptosi %arg0: f32 to i32
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.float to i64
+// CHECK-NEXT: = llvm.fptosi {{.*}} : f32 to i64
   %1 = fptosi %arg0: f32 to i64
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to i32
+// CHECK-NEXT: = llvm.fptosi {{.*}} : f64 to i32
   %2 = fptosi %arg1: f64 to i32
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.double to i64
+// CHECK-NEXT: = llvm.fptosi {{.*}} : f64 to i64
   %3 = fptosi %arg1: f64 to i64
   return
 }
@@ -667,17 +667,17 @@ func @fptosi(%arg0 : f32, %arg1 : f64) {
 // Checking conversion of floating point vectors to integer vector types.
 // CHECK-LABEL: @fptosi_vector
 func @fptosi_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector<2xf64>) {
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x half> to !llvm.vec<2 x i32>
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x f16> to !llvm.vec<2 x i32>
   %0 = fptosi %arg0: vector<2xf16> to vector<2xi32>
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x half> to !llvm.vec<2 x i64>
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x f16> to !llvm.vec<2 x i64>
   %1 = fptosi %arg0: vector<2xf16> to vector<2xi64>
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x i32>
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x i32>
   %2 = fptosi %arg1: vector<2xf32> to vector<2xi32>
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x i64>
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x i64>
   %3 = fptosi %arg1: vector<2xf32> to vector<2xi64>
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x double> to !llvm.vec<2 x i32>
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x f64> to !llvm.vec<2 x i32>
   %4 = fptosi %arg2: vector<2xf64> to vector<2xi32>
-// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x double> to !llvm.vec<2 x i64>
+// CHECK-NEXT: = llvm.fptosi {{.*}} : !llvm.vec<2 x f64> to !llvm.vec<2 x i64>
   %5 = fptosi %arg2: vector<2xf64> to vector<2xi64>
   return
 }
@@ -685,13 +685,13 @@ func @fptosi_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector
 // Checking conversion of floating point to integer types.
 // CHECK-LABEL: @fptoui
 func @fptoui(%arg0 : f32, %arg1 : f64) {
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to i32
+// CHECK-NEXT: = llvm.fptoui {{.*}} : f32 to i32
   %0 = fptoui %arg0: f32 to i32
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.float to i64
+// CHECK-NEXT: = llvm.fptoui {{.*}} : f32 to i64
   %1 = fptoui %arg0: f32 to i64
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to i32
+// CHECK-NEXT: = llvm.fptoui {{.*}} : f64 to i32
   %2 = fptoui %arg1: f64 to i32
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.double to i64
+// CHECK-NEXT: = llvm.fptoui {{.*}} : f64 to i64
   %3 = fptoui %arg1: f64 to i64
   return
 }
@@ -699,17 +699,17 @@ func @fptoui(%arg0 : f32, %arg1 : f64) {
 // Checking conversion of floating point vectors to integer vector types.
 // CHECK-LABEL: @fptoui_vector
 func @fptoui_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector<2xf64>) {
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x half> to !llvm.vec<2 x i32>
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x f16> to !llvm.vec<2 x i32>
   %0 = fptoui %arg0: vector<2xf16> to vector<2xi32>
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x half> to !llvm.vec<2 x i64>
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x f16> to !llvm.vec<2 x i64>
   %1 = fptoui %arg0: vector<2xf16> to vector<2xi64>
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x i32>
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x i32>
   %2 = fptoui %arg1: vector<2xf32> to vector<2xi32>
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x i64>
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x i64>
   %3 = fptoui %arg1: vector<2xf32> to vector<2xi64>
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x double> to !llvm.vec<2 x i32>
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x f64> to !llvm.vec<2 x i32>
   %4 = fptoui %arg2: vector<2xf64> to vector<2xi32>
-// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x double> to !llvm.vec<2 x i64>
+// CHECK-NEXT: = llvm.fptoui {{.*}} : !llvm.vec<2 x f64> to !llvm.vec<2 x i64>
   %5 = fptoui %arg2: vector<2xf64> to vector<2xi64>
   return
 }
@@ -717,17 +717,17 @@ func @fptoui_vector(%arg0 : vector<2xf16>, %arg1 : vector<2xf32>, %arg2 : vector
 // Checking conversion of integer vectors to floating point vector types.
 // CHECK-LABEL: @uitofp_vector
 func @uitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector<2xi64>) {
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x f32>
   %0 = uitofp %arg0: vector<2xi16> to vector<2xf32>
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i16> to !llvm.vec<2 x f64>
   %1 = uitofp %arg0: vector<2xi16> to vector<2xf64>
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x f32>
   %2 = uitofp %arg1: vector<2xi32> to vector<2xf32>
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i32> to !llvm.vec<2 x f64>
   %3 = uitofp %arg1: vector<2xi32> to vector<2xf64>
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x f32>
   %4 = uitofp %arg2: vector<2xi64> to vector<2xf32>
-// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x double>
+// CHECK-NEXT: = llvm.uitofp {{.*}} : !llvm.vec<2 x i64> to !llvm.vec<2 x f64>
   %5 = uitofp %arg2: vector<2xi64> to vector<2xf64>
   return
 }
@@ -735,11 +735,11 @@ func @uitofp_vector(%arg0 : vector<2xi16>, %arg1 : vector<2xi32>, %arg2 : vector
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fptrunc
 func @fptrunc(%arg0 : f32, %arg1 : f64) {
-// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.float to !llvm.half
+// CHECK-NEXT: = llvm.fptrunc {{.*}} : f32 to f16
   %0 = fptrunc %arg0: f32 to f16
-// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.double to !llvm.half
+// CHECK-NEXT: = llvm.fptrunc {{.*}} : f64 to f16
   %1 = fptrunc %arg1: f64 to f16
-// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.double to !llvm.float
+// CHECK-NEXT: = llvm.fptrunc {{.*}} : f64 to f32
   %2 = fptrunc %arg1: f64 to f32
   return
 }
@@ -747,11 +747,11 @@ func @fptrunc(%arg0 : f32, %arg1 : f64) {
 // Checking conversion of integer types to floating point.
 // CHECK-LABEL: @fptrunc
 func @fptrunc_vector(%arg0 : vector<2xf32>, %arg1 : vector<2xf64>) {
-// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.vec<2 x float> to !llvm.vec<2 x half>
+// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.vec<2 x f32> to !llvm.vec<2 x f16>
   %0 = fptrunc %arg0: vector<2xf32> to vector<2xf16>
-// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.vec<2 x double> to !llvm.vec<2 x half>
+// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.vec<2 x f64> to !llvm.vec<2 x f16>
   %1 = fptrunc %arg1: vector<2xf64> to vector<2xf16>
-// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.vec<2 x double> to !llvm.vec<2 x float>
+// CHECK-NEXT: = llvm.fptrunc {{.*}} : !llvm.vec<2 x f64> to !llvm.vec<2 x f32>
   %2 = fptrunc %arg1: vector<2xf64> to vector<2xf32>
   return
 }
@@ -790,23 +790,23 @@ func @dfs_block_order(%arg0: i32) -> (i32) {
   br ^bb1
 }
 
-// CHECK-LABEL: func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
+// CHECK-LABEL: func @fcmp(%arg0: f32, %arg1: f32) {
 func @fcmp(f32, f32) -> () {
 ^bb0(%arg0: f32, %arg1: f32):
-  // CHECK:      llvm.fcmp "oeq" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "ogt" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "oge" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "olt" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "ole" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "one" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "ord" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "ueq" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "ugt" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "uge" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "ult" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "ule" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "une" %arg0, %arg1 : !llvm.float
-  // CHECK-NEXT: llvm.fcmp "uno" %arg0, %arg1 : !llvm.float
+  // CHECK:      llvm.fcmp "oeq" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "ogt" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "oge" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "olt" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "ole" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "one" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "ord" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "ueq" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "ugt" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "uge" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "ult" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "ule" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "une" %arg0, %arg1 : f32
+  // CHECK-NEXT: llvm.fcmp "uno" %arg0, %arg1 : f32
   // CHECK-NEXT: llvm.return
   %1 = cmpf "oeq", %arg0, %arg1 : f32
   %2 = cmpf "ogt", %arg0, %arg1 : f32
@@ -831,40 +831,40 @@ func @vec_bin(%arg0: vector<2x2x2xf32>) -> vector<2x2x2xf32> {
   %0 = addf %arg0, %arg0 : vector<2x2x2xf32>
   return %0 : vector<2x2x2xf32>
 
-//  CHECK-NEXT: llvm.mlir.undef : !llvm.array<2 x array<2 x vec<2 x float>>>
+//  CHECK-NEXT: llvm.mlir.undef : !llvm.array<2 x array<2 x vec<2 x f32>>>
 
 // This block appears 2x2 times
-//  CHECK-NEXT: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<2 x array<2 x vec<2 x float>>>
-//  CHECK-NEXT: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<2 x array<2 x vec<2 x float>>>
-//  CHECK-NEXT: llvm.fadd %{{.*}} : !llvm.vec<2 x float>
-//  CHECK-NEXT: llvm.insertvalue %{{.*}}[0, 0] : !llvm.array<2 x array<2 x vec<2 x float>>>
+//  CHECK-NEXT: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<2 x array<2 x vec<2 x f32>>>
+//  CHECK-NEXT: llvm.extractvalue %{{.*}}[0, 0] : !llvm.array<2 x array<2 x vec<2 x f32>>>
+//  CHECK-NEXT: llvm.fadd %{{.*}} : !llvm.vec<2 x f32>
+//  CHECK-NEXT: llvm.insertvalue %{{.*}}[0, 0] : !llvm.array<2 x array<2 x vec<2 x f32>>>
 
 // We check the proper indexing of extract/insert in the remaining 3 positions.
-//       CHECK: llvm.extractvalue %{{.*}}[0, 1] : !llvm.array<2 x array<2 x vec<2 x float>>>
-//       CHECK: llvm.insertvalue %{{.*}}[0, 1] : !llvm.array<2 x array<2 x vec<2 x float>>>
-//       CHECK: llvm.extractvalue %{{.*}}[1, 0] : !llvm.array<2 x array<2 x vec<2 x float>>>
-//       CHECK: llvm.insertvalue %{{.*}}[1, 0] : !llvm.array<2 x array<2 x vec<2 x float>>>
-//       CHECK: llvm.extractvalue %{{.*}}[1, 1] : !llvm.array<2 x array<2 x vec<2 x float>>>
-//       CHECK: llvm.insertvalue %{{.*}}[1, 1] : !llvm.array<2 x array<2 x vec<2 x float>>>
+//       CHECK: llvm.extractvalue %{{.*}}[0, 1] : !llvm.array<2 x array<2 x vec<2 x f32>>>
+//       CHECK: llvm.insertvalue %{{.*}}[0, 1] : !llvm.array<2 x array<2 x vec<2 x f32>>>
+//       CHECK: llvm.extractvalue %{{.*}}[1, 0] : !llvm.array<2 x array<2 x vec<2 x f32>>>
+//       CHECK: llvm.insertvalue %{{.*}}[1, 0] : !llvm.array<2 x array<2 x vec<2 x f32>>>
+//       CHECK: llvm.extractvalue %{{.*}}[1, 1] : !llvm.array<2 x array<2 x vec<2 x f32>>>
+//       CHECK: llvm.insertvalue %{{.*}}[1, 1] : !llvm.array<2 x array<2 x vec<2 x f32>>>
 
 // And we're done
 //   CHECK-NEXT: return
 }
 
 // CHECK-LABEL: @splat
-// CHECK-SAME: %[[A:arg[0-9]+]]: !llvm.vec<4 x float>
-// CHECK-SAME: %[[ELT:arg[0-9]+]]: !llvm.float
+// CHECK-SAME: %[[A:arg[0-9]+]]: !llvm.vec<4 x f32>
+// CHECK-SAME: %[[ELT:arg[0-9]+]]: f32
 func @splat(%a: vector<4xf32>, %b: f32) -> vector<4xf32> {
   %vb = splat %b : vector<4xf32>
   %r = mulf %a, %vb : vector<4xf32>
   return %r : vector<4xf32>
 }
-// CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.undef : !llvm.vec<4 x float>
+// CHECK-NEXT: %[[UNDEF:[0-9]+]] = llvm.mlir.undef : !llvm.vec<4 x f32>
 // CHECK-NEXT: %[[ZERO:[0-9]+]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : !llvm.vec<4 x float>
+// CHECK-NEXT: %[[V:[0-9]+]] = llvm.insertelement %[[ELT]], %[[UNDEF]][%[[ZERO]] : i32] : !llvm.vec<4 x f32>
 // CHECK-NEXT: %[[SPLAT:[0-9]+]] = llvm.shufflevector %[[V]], %[[UNDEF]] [0 : i32, 0 : i32, 0 : i32, 0 : i32]
-// CHECK-NEXT: %[[SCALE:[0-9]+]] = llvm.fmul %[[A]], %[[SPLAT]] : !llvm.vec<4 x float>
-// CHECK-NEXT: llvm.return %[[SCALE]] : !llvm.vec<4 x float>
+// CHECK-NEXT: %[[SCALE:[0-9]+]] = llvm.fmul %[[A]], %[[SPLAT]] : !llvm.vec<4 x f32>
+// CHECK-NEXT: llvm.return %[[SCALE]] : !llvm.vec<4 x f32>
 
 // CHECK-LABEL: func @view(
 // CHECK: %[[ARG0:.*]]: i64, %[[ARG1:.*]]: i64, %[[ARG2:.*]]: i64
@@ -874,54 +874,54 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
   %0 = alloc() : memref<2048xi8>
 
   // Test two dynamic sizes.
-  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[BASE_PTR:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
   // CHECK: %[[SHIFTED_BASE_PTR:.*]] = llvm.getelementptr %[[BASE_PTR]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
-  // CHECK: %[[CAST_SHIFTED_BASE_PTR:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR]] : !llvm.ptr<i8> to !llvm.ptr<float>
-  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[CAST_SHIFTED_BASE_PTR:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR]] : !llvm.ptr<i8> to !llvm.ptr<f32>
+  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: llvm.insertvalue %[[C0]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[C0]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(1 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[ARG0]], %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[ARG0]], %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mul %{{.*}}, %[[ARG1]]
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   %1 = view %0[%arg2][%arg0, %arg1] : memref<2048xi8> to memref<?x?xf32>
 
   // Test one dynamic size.
-  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[BASE_PTR_2:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
   // CHECK: %[[SHIFTED_BASE_PTR_2:.*]] = llvm.getelementptr %[[BASE_PTR_2]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
-  // CHECK: %[[CAST_SHIFTED_BASE_PTR_2:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_2]] : !llvm.ptr<i8> to !llvm.ptr<float>
-  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_2]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[CAST_SHIFTED_BASE_PTR_2:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_2]] : !llvm.ptr<i8> to !llvm.ptr<f32>
+  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_2]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: llvm.insertvalue %[[C0_2]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[C0_2]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[ARG1]], %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(1 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(4 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mul %{{.*}}, %[[ARG1]]
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   %3 = view %0[%arg2][%arg1] : memref<2048xi8> to memref<4x?xf32>
 
   // Test static sizes.
-  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[BASE_PTR_3:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8>, ptr<i8>, i64, array<1 x i64>, array<1 x i64>)>
   // CHECK: %[[SHIFTED_BASE_PTR_3:.*]] = llvm.getelementptr %[[BASE_PTR_3]][%[[ARG2]]] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
-  // CHECK: %[[CAST_SHIFTED_BASE_PTR_3:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_3]] : !llvm.ptr<i8> to !llvm.ptr<float>
-  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_3]], %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[CAST_SHIFTED_BASE_PTR_3:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_3]] : !llvm.ptr<i8> to !llvm.ptr<f32>
+  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_3]], %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: llvm.insertvalue %[[C0_3]], %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[C0_3]], %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(4 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(1 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(64 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(4 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   %5 = view %0[%arg2][] : memref<2048xi8> to memref<64x4xf32>
 
   // Test view memory space.
@@ -929,34 +929,34 @@ func @view(%arg0 : index, %arg1 : index, %arg2 : index) {
   // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<i8, 4>, ptr<i8, 4>, i64, array<1 x i64>, array<1 x i64>)>
   %6 = alloc() : memref<2048xi8, 4>
 
-  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[BASE_PTR_4:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<i8, 4>, ptr<i8, 4>, i64, array<1 x i64>, array<1 x i64>)>
   // CHECK: %[[SHIFTED_BASE_PTR_4:.*]] = llvm.getelementptr %[[BASE_PTR_4]][%[[ARG2]]] : (!llvm.ptr<i8, 4>, i64) -> !llvm.ptr<i8, 4>
-  // CHECK: %[[CAST_SHIFTED_BASE_PTR_4:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_4]] : !llvm.ptr<i8, 4> to !llvm.ptr<float, 4>
-  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_4]], %{{.*}}[1] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[CAST_SHIFTED_BASE_PTR_4:.*]] = llvm.bitcast %[[SHIFTED_BASE_PTR_4]] : !llvm.ptr<i8, 4> to !llvm.ptr<f32, 4>
+  // CHECK: llvm.insertvalue %[[CAST_SHIFTED_BASE_PTR_4]], %{{.*}}[1] : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[C0_4:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: llvm.insertvalue %[[C0_4]], %{{.*}}[2] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[C0_4]], %{{.*}}[2] : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(4 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(1 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(64 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: llvm.mlir.constant(4 : index) : i64
-  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float, 4>, ptr<float, 4>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32, 4>, ptr<f32, 4>, i64, array<2 x i64>, array<2 x i64>)>
   %7 = view %6[%arg2][] : memref<2048xi8, 4> to memref<64x4xf32, 4>
 
   return
 }
 
 // CHECK-LABEL: func @subview(
-// CHECK-COUNT-2: !llvm.ptr<float>,
+// CHECK-COUNT-2: !llvm.ptr<f32>,
 // CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64,
 // CHECK:         %[[ARG0:[a-zA-Z0-9]*]]: i64,
 // CHECK:         %[[ARG1:[a-zA-Z0-9]*]]: i64,
 // CHECK:         %[[ARG2:.*]]: i64)
 // CHECK32-LABEL: func @subview(
-// CHECK32-COUNT-2: !llvm.ptr<float>,
+// CHECK32-COUNT-2: !llvm.ptr<f32>,
 // CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32,
 // CHECK32:         %[[ARG0:[a-zA-Z0-9]*]]: i32,
 // CHECK32:         %[[ARG1:[a-zA-Z0-9]*]]: i32,
@@ -966,42 +966,42 @@ func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index,
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
   // CHECK32: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
 
-  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
   // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
   // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
   // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
-  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
-  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
-  // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
   // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
   // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
   // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
-  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
-  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
 
   %1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] :
@@ -1011,13 +1011,13 @@ func @subview(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg0 : index,
 }
 
 // CHECK-LABEL: func @subview_non_zero_addrspace(
-// CHECK-COUNT-2: !llvm.ptr<float, 3>,
+// CHECK-COUNT-2: !llvm.ptr<f32, 3>,
 // CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64,
 // CHECK:         %[[ARG0:[a-zA-Z0-9]*]]: i64,
 // CHECK:         %[[ARG1:[a-zA-Z0-9]*]]: i64,
 // CHECK:         %[[ARG2:.*]]: i64)
 // CHECK32-LABEL: func @subview_non_zero_addrspace(
-// CHECK32-COUNT-2: !llvm.ptr<float, 3>,
+// CHECK32-COUNT-2: !llvm.ptr<f32, 3>,
 // CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32,
 // CHECK32:         %[[ARG0:[a-zA-Z0-9]*]]: i32,
 // CHECK32:         %[[ARG1:[a-zA-Z0-9]*]]: i32,
@@ -1027,42 +1027,42 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
   // CHECK32: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
 
-  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float, 3> to !llvm.ptr<float, 3>
-  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float, 3> to !llvm.ptr<float, 3>
-  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32, 3> to !llvm.ptr<f32, 3>
+  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32, 3> to !llvm.ptr<f32, 3>
+  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
   // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
   // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
   // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
-  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i64
-  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i64
-  // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float, 3> to !llvm.ptr<float, 3>
-  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float, 3> to !llvm.ptr<float, 3>
-  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32, 3> to !llvm.ptr<f32, 3>
+  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32, 3> to !llvm.ptr<f32, 3>
+  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
   // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
   // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
   // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
-  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG1]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE1]] : i32
-  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
 
   %1 = subview %0[%arg0, %arg1][%arg0, %arg1][%arg0, %arg1] :
@@ -1072,8 +1072,8 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
 }
 
 // CHECK-LABEL: func @subview_const_size(
-// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
 // CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i64
@@ -1083,8 +1083,8 @@ func @subview_non_zero_addrspace(%0 : memref<64x4xf32, offset: 0, strides: [4, 1
 // CHECK-SAME:         %[[ARG8:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG9:[a-zA-Z0-9]*]]: i64
 // CHECK32-LABEL: func @subview_const_size(
-// CHECK32-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK32-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
+// CHECK32-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
+// CHECK32-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
 // CHECK32-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i32
 // CHECK32-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i32
 // CHECK32-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i32
@@ -1098,48 +1098,48 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
   // CHECK32: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
 
-  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64
   // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
   // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64
   // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
-  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
-  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64
-  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
-  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64
-  // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32
   // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
   // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32
   // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
-  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
-  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[CST2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[DESCSTRIDE1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32
-  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[DESCSTRIDE1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
-  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST4]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32
-  // CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   %1 = subview %0[%arg0, %arg1][4, 2][%arg0, %arg1] :
     memref<64x4xf32, offset: 0, strides: [4, 1]>
     to memref<4x2xf32, offset: ?, strides: [?, ?]>
@@ -1147,8 +1147,8 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
 }
 
 // CHECK-LABEL: func @subview_const_stride(
-// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
+// CHECK-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
+// CHECK-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
 // CHECK-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i64
@@ -1158,8 +1158,8 @@ func @subview_const_size(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %arg
 // CHECK-SAME:         %[[ARG8:[a-zA-Z0-9]*]]: i64
 // CHECK-SAME:         %[[ARG9:[a-zA-Z0-9]*]]: i64
 // CHECK32-LABEL: func @subview_const_stride(
-// CHECK32-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
-// CHECK32-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<float>,
+// CHECK32-SAME:         %[[ARG0:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
+// CHECK32-SAME:         %[[ARG1:[a-zA-Z0-9]*]]: !llvm.ptr<f32>,
 // CHECK32-SAME:         %[[ARG2:[a-zA-Z0-9]*]]: i32
 // CHECK32-SAME:         %[[ARG3:[a-zA-Z0-9]*]]: i32
 // CHECK32-SAME:         %[[ARG4:[a-zA-Z0-9]*]]: i32
@@ -1173,44 +1173,44 @@ func @subview_const_stride(%0 : memref<64x4xf32, offset: 0, strides: [4, 1]>, %a
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
   // CHECK32: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
 
-  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i64
   // CHECK: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i64
   // CHECK: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i64
   // CHECK: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i64
-  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
-  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[CST2]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG7]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC4:.*]] = llvm.insertvalue %[[CST2]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[ARG7]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
-  // CHECK: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[OFFINC:.*]] = llvm.mul %[[ARG7]], %[[STRIDE0]] : i32
   // CHECK32: %[[OFF1:.*]] = llvm.add %[[OFF]], %[[OFFINC]] : i32
   // CHECK32: %[[OFFINC1:.*]] = llvm.mul %[[ARG8]], %[[STRIDE1]] : i32
   // CHECK32: %[[OFF2:.*]] = llvm.add %[[OFF1]], %[[OFFINC1]] : i32
-  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFF2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG8]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST2:.*]] = llvm.mlir.constant(2 : i64)
-  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST2]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG7]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST2]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[ARG7]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
-  // CHECK32: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   %1 = subview %0[%arg0, %arg1][%arg0, %arg1][1, 2] :
     memref<64x4xf32, offset: 0, strides: [4, 1]>
     to memref<?x?xf32, offset: ?, strides: [4, 2]>
@@ -1224,23 +1224,23 @@ func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides:
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
   // CHECK32: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
 
-  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST8:.*]] = llvm.mlir.constant(8 : index)
-  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[CST8]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[CST8]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST3:.*]] = llvm.mlir.constant(3 : i64)
-  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[CST3]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[CST3]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST1:.*]] = llvm.mlir.constant(1 : i64)
-  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST62:.*]] = llvm.mlir.constant(62 : i64)
-  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST62]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST62]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST4:.*]] = llvm.mlir.constant(4 : i64)
-  // CHECK32: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: llvm.insertvalue %[[CST4]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   %1 = subview %0[0, 8][62, 3][1, 1] :
     memref<64x4xf32, offset: 0, strides: [4, 1]>
     to memref<62x3xf32, offset: 8, strides: [4, 1]>
@@ -1248,13 +1248,13 @@ func @subview_const_stride_and_offset(%0 : memref<64x4xf32, offset: 0, strides:
 }
 
 // CHECK-LABEL: func @subview_mixed_static_dynamic(
-// CHECK-COUNT-2: !llvm.ptr<float>,
+// CHECK-COUNT-2: !llvm.ptr<f32>,
 // CHECK-COUNT-5: {{%[a-zA-Z0-9]*}}: i64,
 // CHECK:         %[[ARG0:[a-zA-Z0-9]*]]: i64,
 // CHECK:         %[[ARG1:[a-zA-Z0-9]*]]: i64,
 // CHECK:         %[[ARG2:.*]]: i64)
 // CHECK32-LABEL: func @subview_mixed_static_dynamic(
-// CHECK32-COUNT-2: !llvm.ptr<float>,
+// CHECK32-COUNT-2: !llvm.ptr<f32>,
 // CHECK32-COUNT-5: {{%[a-zA-Z0-9]*}}: i32,
 // CHECK32:         %[[ARG0:[a-zA-Z0-9]*]]: i32,
 // CHECK32:         %[[ARG1:[a-zA-Z0-9]*]]: i32,
@@ -1264,28 +1264,28 @@ func @subview_mixed_static_dynamic(%0 : memref<64x4xf32, offset: 0, strides: [4,
   // CHECK: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
   // CHECK32: %[[MEMREF:.*]] = llvm.insertvalue %{{.*}}, %{{.*}}[4, 1]
 
-  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<float> to !llvm.ptr<float>
-  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
-  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST0:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC0:.*]] = llvm.insertvalue %[[BITCAST0]], %[[DESC]][0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[BITCAST1:.*]] = llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<f32>
+  // CHECK32: %[[DESC1:.*]] = llvm.insertvalue %[[BITCAST1]], %[[DESC0]][1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE0:.*]] = llvm.extractvalue %[[MEMREF]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[STRIDE1:.*]] = llvm.extractvalue %[[MEMREF]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[OFF:.*]] = llvm.extractvalue %[[MEMREF]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[OFFM1:.*]] = llvm.mul %[[ARG1]], %[[STRIDE0]] : i32
   // CHECK32: %[[OFFA1:.*]] = llvm.add %[[OFF]], %[[OFFM1]] : i32
   // CHECK32: %[[CST8:.*]] = llvm.mlir.constant(8 : i64) : i32
   // CHECK32: %[[OFFM2:.*]] = llvm.mul %[[CST8]], %[[STRIDE1]] : i32
   // CHECK32: %[[OFFA2:.*]] = llvm.add %[[OFFA1]], %[[OFFM2]] : i32
-  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFFA2]], %[[DESC1]][2] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC2:.*]] = llvm.insertvalue %[[OFFA2]], %[[DESC1]][2] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
 
-  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC3:.*]] = llvm.insertvalue %[[ARG2]], %[[DESC2]][3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST1:.*]] = llvm.mlir.constant(1 : i64) : i32
-  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC4:.*]] = llvm.insertvalue %[[CST1]], %[[DESC3]][4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[CST62:.*]] = llvm.mlir.constant(62 : i64) : i32
-  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST62]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: %[[DESC5:.*]] = llvm.insertvalue %[[CST62]], %[[DESC4]][3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   // CHECK32: %[[DESCSTRIDE0:.*]] = llvm.mul %[[ARG0]], %[[STRIDE0]] : i32
-  // CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i32, array<2 x i32>, array<2 x i32>)>
+  // CHECK32: llvm.insertvalue %[[DESCSTRIDE0]], %[[DESC5]][4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i32, array<2 x i32>, array<2 x i32>)>
   %1 = subview %0[%arg1, 8][62, %arg2][%arg0, 1] :
     memref<64x4xf32, offset: 0, strides: [4, 1]>
     to memref<62x?xf32, offset: ?, strides: [?, 1]>
@@ -1344,10 +1344,10 @@ func @generic_atomic_rmw(%I : memref<10xi32>, %i : index) -> i32 {
 
 // CHECK-LABEL: func @assume_alignment
 func @assume_alignment(%0 : memref<4x4xf16>) {
-  // CHECK: %[[PTR:.*]] = llvm.extractvalue %[[MEMREF:.*]][1] : !llvm.struct<(ptr<half>, ptr<half>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[PTR:.*]] = llvm.extractvalue %[[MEMREF:.*]][1] : !llvm.struct<(ptr<f16>, ptr<f16>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK-NEXT: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
   // CHECK-NEXT: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : i64
-  // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[PTR]] : !llvm.ptr<half> to i64
+  // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[PTR]] : !llvm.ptr<f16> to i64
   // CHECK-NEXT: %[[MASKED_PTR:.*]] = llvm.and %[[INT]], %[[MASK:.*]] : i64
   // CHECK-NEXT: %[[CONDITION:.*]] = llvm.icmp "eq" %[[MASKED_PTR]], %[[ZERO]] : i64
   // CHECK-NEXT: "llvm.intr.assume"(%[[CONDITION]]) : (i1) -> ()
@@ -1359,49 +1359,22 @@ func @assume_alignment(%0 : memref<4x4xf16>) {
 
 // CHECK-LABEL: func @mlir_cast_to_llvm
 // CHECK-SAME: %[[ARG:.*]]:
-func @mlir_cast_to_llvm(%0 : vector<2xf16>) -> !llvm.vec<2 x half> {
-  %1 = llvm.mlir.cast %0 : vector<2xf16> to !llvm.vec<2 x half>
+func @mlir_cast_to_llvm(%0 : vector<2xf16>) -> !llvm.vec<2 x f16> {
+  %1 = llvm.mlir.cast %0 : vector<2xf16> to !llvm.vec<2 x f16>
   // CHECK-NEXT: llvm.return %[[ARG]]
-  return %1 : !llvm.vec<2 x half>
+  return %1 : !llvm.vec<2 x f16>
 }
 
 // CHECK-LABEL: func @mlir_cast_from_llvm
 // CHECK-SAME: %[[ARG:.*]]:
-func @mlir_cast_from_llvm(%0 : !llvm.vec<2 x half>) -> vector<2xf16> {
-  %1 = llvm.mlir.cast %0 : !llvm.vec<2 x half> to vector<2xf16>
+func @mlir_cast_from_llvm(%0 : !llvm.vec<2 x f16>) -> vector<2xf16> {
+  %1 = llvm.mlir.cast %0 : !llvm.vec<2 x f16> to vector<2xf16>
   // CHECK-NEXT: llvm.return %[[ARG]]
   return %1 : vector<2xf16>
 }
 
 // -----
 
-// CHECK-LABEL: func @mlir_cast_to_llvm
-// CHECK-SAME: %[[ARG:.*]]:
-func @mlir_cast_to_llvm(%0 : f16) -> !llvm.half {
-  %1 = llvm.mlir.cast %0 : f16 to !llvm.half
-  // CHECK-NEXT: llvm.return %[[ARG]]
-  return %1 : !llvm.half
-}
-
-// CHECK-LABEL: func @mlir_cast_from_llvm
-// CHECK-SAME: %[[ARG:.*]]:
-func @mlir_cast_from_llvm(%0 : !llvm.half) -> f16 {
-  %1 = llvm.mlir.cast %0 : !llvm.half to f16
-  // CHECK-NEXT: llvm.return %[[ARG]]
-  return %1 : f16
-}
-
-// -----
-
-// CHECK-LABEL: func @bfloat
-// CHECK-SAME: !llvm.bfloat) -> !llvm.bfloat
-func @bfloat(%arg0: bf16) -> bf16 {
-  return %arg0 : bf16
-}
-// CHECK-NEXT: return %{{.*}} : !llvm.bfloat
-
-// -----
-
 // CHECK-LABEL: func @memref_index
 // CHECK-SAME: %arg0: !llvm.ptr<i64>, %arg1: !llvm.ptr<i64>,
 // CHECK-SAME: %arg2: i64, %arg3: i64, %arg4: i64)

diff  --git a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
index 0d346d4cfa59..56126f603c27 100644
--- a/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
+++ b/mlir/test/Conversion/StandardToLLVM/standard-to-llvm.mlir
@@ -1,11 +1,11 @@
 // RUN: mlir-opt -allow-unregistered-dialect %s -convert-std-to-llvm -split-input-file -verify-diagnostics | FileCheck %s
 
 // CHECK-LABEL: func @address_space(
-// CHECK-SAME:    !llvm.ptr<float, 7>
+// CHECK-SAME:    !llvm.ptr<f32, 7>
 func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
   %0 = alloc() : memref<32xf32, affine_map<(d0) -> (d0)>, 5>
   %1 = constant 7 : index
-  // CHECK: llvm.load %{{.*}} : !llvm.ptr<float, 5>
+  // CHECK: llvm.load %{{.*}} : !llvm.ptr<f32, 5>
   %2 = load %0[%1] : memref<32xf32, affine_map<(d0) -> (d0)>, 5>
   std.return
 }
@@ -13,11 +13,11 @@ func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
 // -----
 
 // CHECK-LABEL: func @rsqrt(
-// CHECK-SAME: !llvm.float
+// CHECK-SAME: f32
 func @rsqrt(%arg0 : f32) {
-  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
-  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (!llvm.float) -> !llvm.float
-  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.float
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
+  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (f32) -> f32
+  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : f32
   %0 = rsqrt %arg0 : f32
   std.return
 }
@@ -25,9 +25,9 @@ func @rsqrt(%arg0 : f32) {
 // -----
 
 // CHECK-LABEL: func @sine(
-// CHECK-SAME: !llvm.float
+// CHECK-SAME: f32
 func @sine(%arg0 : f32) {
-  // CHECK: "llvm.intr.sin"(%arg0) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.sin"(%arg0) : (f32) -> f32
   %0 = sin %arg0 : f32
   std.return
 }
@@ -35,9 +35,9 @@ func @sine(%arg0 : f32) {
 // -----
 
 // CHECK-LABEL: func @ceilf(
-// CHECK-SAME: !llvm.float
+// CHECK-SAME: f32
 func @ceilf(%arg0 : f32) {
-  // CHECK: "llvm.intr.ceil"(%arg0) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.ceil"(%arg0) : (f32) -> f32
   %0 = ceilf %arg0 : f32
   std.return
 }
@@ -45,9 +45,9 @@ func @ceilf(%arg0 : f32) {
 // -----
 
 // CHECK-LABEL: func @floorf(
-// CHECK-SAME: !llvm.float
+// CHECK-SAME: f32
 func @floorf(%arg0 : f32) {
-  // CHECK: "llvm.intr.floor"(%arg0) : (!llvm.float) -> !llvm.float
+  // CHECK: "llvm.intr.floor"(%arg0) : (f32) -> f32
   %0 = floorf %arg0 : f32
   std.return
 }
@@ -56,11 +56,11 @@ func @floorf(%arg0 : f32) {
 
 
 // CHECK-LABEL: func @rsqrt_double(
-// CHECK-SAME: !llvm.double
+// CHECK-SAME: f64
 func @rsqrt_double(%arg0 : f64) {
-  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f64) : !llvm.double
-  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (!llvm.double) -> !llvm.double
-  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.double
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f64) : f64
+  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (f64) -> f64
+  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : f64
   %0 = rsqrt %arg0 : f64
   std.return
 }
@@ -68,11 +68,11 @@ func @rsqrt_double(%arg0 : f64) {
 // -----
 
 // CHECK-LABEL: func @rsqrt_vector(
-// CHECK-SAME: !llvm.vec<4 x float>
+// CHECK-SAME: !llvm.vec<4 x f32>
 func @rsqrt_vector(%arg0 : vector<4xf32>) {
-  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x float>
-  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (!llvm.vec<4 x float>) -> !llvm.vec<4 x float>
-  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.vec<4 x float>
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x f32>
+  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%arg0) : (!llvm.vec<4 x f32>) -> !llvm.vec<4 x f32>
+  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.vec<4 x f32>
   %0 = rsqrt %arg0 : vector<4xf32>
   std.return
 }
@@ -80,13 +80,13 @@ func @rsqrt_vector(%arg0 : vector<4xf32>) {
 // -----
 
 // CHECK-LABEL: func @rsqrt_multidim_vector(
-// CHECK-SAME: !llvm.array<4 x vec<3 x float>>
+// CHECK-SAME: !llvm.array<4 x vec<3 x f32>>
 func @rsqrt_multidim_vector(%arg0 : vector<4x3xf32>) {
-  // CHECK: %[[EXTRACT:.*]] = llvm.extractvalue %arg0[0] : !llvm.array<4 x vec<3 x float>>
-  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<3xf32>) : !llvm.vec<3 x float>
-  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%[[EXTRACT]]) : (!llvm.vec<3 x float>) -> !llvm.vec<3 x float>
-  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.vec<3 x float>
-  // CHECK: %[[INSERT:.*]] = llvm.insertvalue %[[DIV]], %0[0] : !llvm.array<4 x vec<3 x float>>
+  // CHECK: %[[EXTRACT:.*]] = llvm.extractvalue %arg0[0] : !llvm.array<4 x vec<3 x f32>>
+  // CHECK: %[[ONE:.*]] = llvm.mlir.constant(dense<1.000000e+00> : vector<3xf32>) : !llvm.vec<3 x f32>
+  // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%[[EXTRACT]]) : (!llvm.vec<3 x f32>) -> !llvm.vec<3 x f32>
+  // CHECK: %[[DIV:.*]] = llvm.fdiv %[[ONE]], %[[SQRT]] : !llvm.vec<3 x f32>
+  // CHECK: %[[INSERT:.*]] = llvm.insertvalue %[[DIV]], %0[0] : !llvm.array<4 x vec<3 x f32>>
   %0 = rsqrt %arg0 : vector<4x3xf32>
   std.return
 }
@@ -111,16 +111,16 @@ func @assert_test_function(%arg : i1) {
 // -----
 
 // CHECK-LABEL: func @transpose
-//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   llvm.insertvalue {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   llvm.extractvalue {{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue {{.*}}[3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   llvm.extractvalue {{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue {{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   llvm.extractvalue {{.*}}[3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue {{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   llvm.insertvalue {{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue {{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   llvm.extractvalue {{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue {{.*}}[3, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   llvm.extractvalue {{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue {{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   llvm.extractvalue {{.*}}[3, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue {{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
   %0 = transpose %arg0 (i, j, k) -> (k, i, j) : memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]> to memref<?x?x?xf32, affine_map<(d0, d1, d2)[s0, s1, s2] -> (d2 * s1 + s0 + d0 * s2 + d1)>>
   return
@@ -128,13 +128,13 @@ func @transpose(%arg0: memref<?x?x?xf32, offset: ?, strides: [?, ?, 1]>) {
 
 // -----
 
-// CHECK: llvm.mlir.global external @gv0() : !llvm.array<2 x float>
+// CHECK: llvm.mlir.global external @gv0() : !llvm.array<2 x f32>
 global_memref @gv0 : memref<2xf32> = uninitialized
 
-// CHECK: llvm.mlir.global private @gv1() : !llvm.array<2 x float>
+// CHECK: llvm.mlir.global private @gv1() : !llvm.array<2 x f32>
 global_memref "private" @gv1 : memref<2xf32>
 
-// CHECK: llvm.mlir.global external @gv2(dense<{{\[\[}}0.000000e+00, 1.000000e+00, 2.000000e+00], [3.000000e+00, 4.000000e+00, 5.000000e+00]]> : tensor<2x3xf32>) : !llvm.array<2 x array<3 x float>>
+// CHECK: llvm.mlir.global external @gv2(dense<{{\[\[}}0.000000e+00, 1.000000e+00, 2.000000e+00], [3.000000e+00, 4.000000e+00, 5.000000e+00]]> : tensor<2x3xf32>) : !llvm.array<2 x array<3 x f32>>
 global_memref @gv2 : memref<2x3xf32> = dense<[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]>
 
 // Test 1D memref.
@@ -143,18 +143,18 @@ func @get_gv0_memref() {
   %0 = get_global_memref @gv0 : memref<2xf32>
   // CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : i64
   // CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : i64
-  // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv0 : !llvm.ptr<array<2 x float>>
+  // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv0 : !llvm.ptr<array<2 x f32>>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x float>>, i64, i64) -> !llvm.ptr<float>
+  // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x f32>>, i64, i64) -> !llvm.ptr<f32>
   // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
-  // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<float>
-  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-  // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-  // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+  // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<f32>
+  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+  // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+  // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
   // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-  // CHECK: llvm.insertvalue %[[DIM]], {{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-  // CHECK: llvm.insertvalue %[[STRIDE]], {{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+  // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+  // CHECK: llvm.insertvalue %[[DIM]], {{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+  // CHECK: llvm.insertvalue %[[STRIDE]], {{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
   return
 }
 
@@ -164,41 +164,41 @@ func @get_gv2_memref() {
   // CHECK: %[[DIM0:.*]] = llvm.mlir.constant(2 : index) : i64
   // CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : i64
   // CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : i64
-  // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv2 : !llvm.ptr<array<2 x array<3 x float>>>
+  // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv2 : !llvm.ptr<array<2 x array<3 x f32>>>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x array<3 x float>>>, i64, i64, i64) -> !llvm.ptr<float>
+  // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr<array<2 x array<3 x f32>>>, i64, i64, i64) -> !llvm.ptr<f32>
   // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
-  // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<float>
-  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<f32>
+  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[DIM0]], {{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[DIM1]], {{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[DIM1]], {{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-  // CHECK: llvm.insertvalue %[[STRIDE1]], {{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[DIM0]], {{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[DIM1]], {{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[DIM1]], {{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+  // CHECK: llvm.insertvalue %[[STRIDE1]], {{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 
   %0 = get_global_memref @gv2 : memref<2x3xf32>
   return
 }
 
 // Test scalar memref.
-// CHECK: llvm.mlir.global external @gv3(1.000000e+00 : f32) : !llvm.float
+// CHECK: llvm.mlir.global external @gv3(1.000000e+00 : f32) : f32
 global_memref @gv3 : memref<f32> = dense<1.0>
 
 // CHECK-LABEL: func @get_gv3_memref
 func @get_gv3_memref() {
-  // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr<float>
+  // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr<f32>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+  // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
   // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
-  // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<float>
-  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-  // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-  // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+  // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr<f32>
+  // CHECK: llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+  // CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+  // CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
   // CHECK: %[[OFFSET:.*]] = llvm.mlir.constant(0 : index) : i64
-  // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+  // CHECK: llvm.insertvalue %[[OFFSET]], {{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
   %0 = get_global_memref @gv3 : memref<f32>
   return
 }
@@ -217,9 +217,9 @@ func private @zero_result_func()
 // -----
 
 // CHECK-LABEL: func @powf(
-// CHECK-SAME: !llvm.double
+// CHECK-SAME: f64
 func @powf(%arg0 : f64) {
-  // CHECK: %[[POWF:.*]] = "llvm.intr.pow"(%arg0, %arg0) : (!llvm.double, !llvm.double) -> !llvm.double
+  // CHECK: %[[POWF:.*]] = "llvm.intr.pow"(%arg0, %arg0) : (f64, f64) -> f64
   %0 = std.powf %arg0, %arg0 : f64
   std.return
 }

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
index 973361899758..d15499ec3871 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
@@ -32,14 +32,14 @@ func @genbool_var_1d(%arg0: index) -> vector<11xi1> {
 // CMP32: %[[A:.*]] = llvm.add %{{.*}}, %[[C]] : !llvm.vec<16 x i32>
 // CMP32: %[[M:.*]] = llvm.icmp "slt" %[[A]], %{{.*}} : !llvm.vec<16 x i32>
 // CMP32: %[[L:.*]] = llvm.intr.masked.load %{{.*}}, %[[M]], %{{.*}}
-// CMP32: llvm.return %[[L]] : !llvm.vec<16 x float>
+// CMP32: llvm.return %[[L]] : !llvm.vec<16 x f32>
 
 // CMP64-LABEL: llvm.func @transfer_read_1d
 // CMP64: %[[C:.*]] = llvm.mlir.constant(dense<[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]> : vector<16xi64>) : !llvm.vec<16 x i64>
 // CMP64: %[[A:.*]] = llvm.add %{{.*}}, %[[C]] : !llvm.vec<16 x i64>
 // CMP64: %[[M:.*]] = llvm.icmp "slt" %[[A]], %{{.*}} : !llvm.vec<16 x i64>
 // CMP64: %[[L:.*]] = llvm.intr.masked.load %{{.*}}, %[[M]], %{{.*}}
-// CMP64: llvm.return %[[L]] : !llvm.vec<16 x float>
+// CMP64: llvm.return %[[L]] : !llvm.vec<16 x f32>
 
 func @transfer_read_1d(%A : memref<?xf32>, %i: index) -> vector<16xf32> {
   %d = constant -1.0: f32

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
index 70b0a8fadfee..e35f6f8f0a4b 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-reduction-to-llvm.mlir
@@ -3,18 +3,18 @@
 
 //
 // CHECK-LABEL: llvm.func @reduce_add_f32(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x float>)
-//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : !llvm.float
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x f32>)
+//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (!llvm.float, !llvm.vec<16 x float>) -> !llvm.float
-//      CHECK: llvm.return %[[V]] : !llvm.float
+// CHECK-SAME: {reassoc = false} : (f32, !llvm.vec<16 x f32>) -> f32
+//      CHECK: llvm.return %[[V]] : f32
 //
 // REASSOC-LABEL: llvm.func @reduce_add_f32(
-// REASSOC-SAME: %[[A:.*]]: !llvm.vec<16 x float>)
-//      REASSOC: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : !llvm.float
+// REASSOC-SAME: %[[A:.*]]: !llvm.vec<16 x f32>)
+//      REASSOC: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
 //      REASSOC: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// REASSOC-SAME: {reassoc = true} : (!llvm.float, !llvm.vec<16 x float>) -> !llvm.float
-//      REASSOC: llvm.return %[[V]] : !llvm.float
+// REASSOC-SAME: {reassoc = true} : (f32, !llvm.vec<16 x f32>) -> f32
+//      REASSOC: llvm.return %[[V]] : f32
 //
 func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.reduction "add", %arg0 : vector<16xf32> into f32
@@ -23,18 +23,18 @@ func @reduce_add_f32(%arg0: vector<16xf32>) -> f32 {
 
 //
 // CHECK-LABEL: llvm.func @reduce_mul_f32(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x float>)
-//      CHECK: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x f32>)
+//      CHECK: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fmul"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (!llvm.float, !llvm.vec<16 x float>) -> !llvm.float
-//      CHECK: llvm.return %[[V]] : !llvm.float
+// CHECK-SAME: {reassoc = false} : (f32, !llvm.vec<16 x f32>) -> f32
+//      CHECK: llvm.return %[[V]] : f32
 //
 // REASSOC-LABEL: llvm.func @reduce_mul_f32(
-// REASSOC-SAME: %[[A:.*]]: !llvm.vec<16 x float>)
-//      REASSOC: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
+// REASSOC-SAME: %[[A:.*]]: !llvm.vec<16 x f32>)
+//      REASSOC: %[[C:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
 //      REASSOC: %[[V:.*]] = "llvm.intr.vector.reduce.fmul"(%[[C]], %[[A]])
-// REASSOC-SAME: {reassoc = true} : (!llvm.float, !llvm.vec<16 x float>) -> !llvm.float
-//      REASSOC: llvm.return %[[V]] : !llvm.float
+// REASSOC-SAME: {reassoc = true} : (f32, !llvm.vec<16 x f32>) -> f32
+//      REASSOC: llvm.return %[[V]] : f32
 //
 func @reduce_mul_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.reduction "mul", %arg0 : vector<16xf32> into f32

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 0a2d94fcf702..c1e4e088412d 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -5,305 +5,305 @@ func @broadcast_vec1d_from_scalar(%arg0: f32) -> vector<2xf32> {
   return %0 : vector<2xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_vec1d_from_scalar(
-// CHECK-SAME:  %[[A:.*]]: !llvm.float)
-// CHECK:       %[[T0:.*]] = llvm.mlir.undef : !llvm.vec<2 x float>
+// CHECK-SAME:  %[[A:.*]]: f32)
+// CHECK:       %[[T0:.*]] = llvm.mlir.undef : !llvm.vec<2 x f32>
 // CHECK:       %[[T1:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T2:.*]] = llvm.insertelement %[[A]], %[[T0]][%[[T1]] : i32] : !llvm.vec<2 x float>
-// CHECK:       %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T0]] [0 : i32, 0 : i32] : !llvm.vec<2 x float>, !llvm.vec<2 x float>
-// CHECK:       llvm.return %[[T3]] : !llvm.vec<2 x float>
+// CHECK:       %[[T2:.*]] = llvm.insertelement %[[A]], %[[T0]][%[[T1]] : i32] : !llvm.vec<2 x f32>
+// CHECK:       %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T0]] [0 : i32, 0 : i32] : !llvm.vec<2 x f32>, !llvm.vec<2 x f32>
+// CHECK:       llvm.return %[[T3]] : !llvm.vec<2 x f32>
 
 func @broadcast_vec2d_from_scalar(%arg0: f32) -> vector<2x3xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<2x3xf32>
   return %0 : vector<2x3xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_vec2d_from_scalar(
-// CHECK-SAME:  %[[A:.*]]: !llvm.float)
-// CHECK:       %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x vec<3 x float>>
-// CHECK:       %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+// CHECK-SAME:  %[[A:.*]]: f32)
+// CHECK:       %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x vec<3 x f32>>
+// CHECK:       %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 // CHECK:       %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<3 x float>
-// CHECK:       %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>>
-// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][1] : !llvm.array<2 x vec<3 x float>>
-// CHECK:       llvm.return %[[T6]] : !llvm.array<2 x vec<3 x float>>
+// CHECK:       %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<3 x f32>
+// CHECK:       %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0] : !llvm.array<2 x vec<3 x f32>>
+// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][1] : !llvm.array<2 x vec<3 x f32>>
+// CHECK:       llvm.return %[[T6]] : !llvm.array<2 x vec<3 x f32>>
 
 func @broadcast_vec3d_from_scalar(%arg0: f32) -> vector<2x3x4xf32> {
   %0 = vector.broadcast %arg0 : f32 to vector<2x3x4xf32>
   return %0 : vector<2x3x4xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_vec3d_from_scalar(
-// CHECK-SAME:  %[[A:.*]]: !llvm.float)
-// CHECK:       %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x array<3 x vec<4 x float>>>
-// CHECK:       %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<4 x float>
+// CHECK-SAME:  %[[A:.*]]: f32)
+// CHECK:       %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x array<3 x vec<4 x f32>>>
+// CHECK:       %[[T1:.*]] = llvm.mlir.undef : !llvm.vec<4 x f32>
 // CHECK:       %[[T2:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<4 x float>
-// CHECK:       %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
-// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0, 0] : !llvm.array<2 x array<3 x vec<4 x float>>>
-// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0, 1] : !llvm.array<2 x array<3 x vec<4 x float>>>
-// CHECK:       %[[T7:.*]] = llvm.insertvalue %[[T4]], %[[T6]][0, 2] : !llvm.array<2 x array<3 x vec<4 x float>>>
-// CHECK:       %[[T8:.*]] = llvm.insertvalue %[[T4]], %[[T7]][1, 0] : !llvm.array<2 x array<3 x vec<4 x float>>>
-// CHECK:       %[[T9:.*]] = llvm.insertvalue %[[T4]], %[[T8]][1, 1] : !llvm.array<2 x array<3 x vec<4 x float>>>
-// CHECK:       %[[T10:.*]] = llvm.insertvalue %[[T4]], %[[T9]][1, 2] : !llvm.array<2 x array<3 x vec<4 x float>>>
-// CHECK:       llvm.return %[[T10]] : !llvm.array<2 x array<3 x vec<4 x float>>>
+// CHECK:       %[[T3:.*]] = llvm.insertelement %[[A]], %[[T1]][%[[T2]] : i32] : !llvm.vec<4 x f32>
+// CHECK:       %[[T4:.*]] = llvm.shufflevector %[[T3]], %[[T3]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x f32>, !llvm.vec<4 x f32>
+// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T0]][0, 0] : !llvm.array<2 x array<3 x vec<4 x f32>>>
+// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][0, 1] : !llvm.array<2 x array<3 x vec<4 x f32>>>
+// CHECK:       %[[T7:.*]] = llvm.insertvalue %[[T4]], %[[T6]][0, 2] : !llvm.array<2 x array<3 x vec<4 x f32>>>
+// CHECK:       %[[T8:.*]] = llvm.insertvalue %[[T4]], %[[T7]][1, 0] : !llvm.array<2 x array<3 x vec<4 x f32>>>
+// CHECK:       %[[T9:.*]] = llvm.insertvalue %[[T4]], %[[T8]][1, 1] : !llvm.array<2 x array<3 x vec<4 x f32>>>
+// CHECK:       %[[T10:.*]] = llvm.insertvalue %[[T4]], %[[T9]][1, 2] : !llvm.array<2 x array<3 x vec<4 x f32>>>
+// CHECK:       llvm.return %[[T10]] : !llvm.array<2 x array<3 x vec<4 x f32>>>
 
 func @broadcast_vec1d_from_vec1d(%arg0: vector<2xf32>) -> vector<2xf32> {
   %0 = vector.broadcast %arg0 : vector<2xf32> to vector<2xf32>
   return %0 : vector<2xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_vec1d_from_vec1d(
-// CHECK-SAME:  %[[A:.*]]: !llvm.vec<2 x float>)
-// CHECK:       llvm.return %[[A]] : !llvm.vec<2 x float>
+// CHECK-SAME:  %[[A:.*]]: !llvm.vec<2 x f32>)
+// CHECK:       llvm.return %[[A]] : !llvm.vec<2 x f32>
 
 func @broadcast_vec2d_from_vec1d(%arg0: vector<2xf32>) -> vector<3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<2xf32> to vector<3x2xf32>
   return %0 : vector<3x2xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_vec2d_from_vec1d(
-// CHECK-SAME:  %[[A:.*]]: !llvm.vec<2 x float>)
-// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x2xf32>) : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T1:.*]] = llvm.insertvalue %[[A]], %[[T0]][0] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][1] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][2] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       llvm.return %[[T3]] : !llvm.array<3 x vec<2 x float>>
+// CHECK-SAME:  %[[A:.*]]: !llvm.vec<2 x f32>)
+// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x2xf32>) : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T1:.*]] = llvm.insertvalue %[[A]], %[[T0]][0] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][1] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][2] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       llvm.return %[[T3]] : !llvm.array<3 x vec<2 x f32>>
 
 func @broadcast_vec3d_from_vec1d(%arg0: vector<2xf32>) -> vector<4x3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<2xf32> to vector<4x3x2xf32>
   return %0 : vector<4x3x2xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_vec3d_from_vec1d(
-// CHECK-SAME:  %[[A:.*]]: !llvm.vec<2 x float>)
-// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x2xf32>) : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3x2xf32>) : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T0]][0] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][1] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[A]], %[[T3]][2] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T1]][0] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][1] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T7:.*]] = llvm.insertvalue %[[T4]], %[[T6]][2] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T8:.*]] = llvm.insertvalue %[[T4]], %[[T7]][3] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       llvm.return %[[T8]] : !llvm.array<4 x array<3 x vec<2 x float>>>
+// CHECK-SAME:  %[[A:.*]]: !llvm.vec<2 x f32>)
+// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x2xf32>) : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3x2xf32>) : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T0]][0] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][1] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[A]], %[[T3]][2] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T4]], %[[T1]][0] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T4]], %[[T5]][1] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T7:.*]] = llvm.insertvalue %[[T4]], %[[T6]][2] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T8:.*]] = llvm.insertvalue %[[T4]], %[[T7]][3] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       llvm.return %[[T8]] : !llvm.array<4 x array<3 x vec<2 x f32>>>
 
 func @broadcast_vec3d_from_vec2d(%arg0: vector<3x2xf32>) -> vector<4x3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<3x2xf32> to vector<4x3x2xf32>
   return %0 : vector<4x3x2xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_vec3d_from_vec2d(
-// CHECK-SAME:  %[[A:.*]]: !llvm.array<3 x vec<2 x float>>)
-// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3x2xf32>) : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T1:.*]] = llvm.insertvalue %[[A]], %[[T0]][0] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][1] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][2] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[A]], %[[T3]][3] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       llvm.return %[[T4]] : !llvm.array<4 x array<3 x vec<2 x float>>>
+// CHECK-SAME:  %[[A:.*]]: !llvm.array<3 x vec<2 x f32>>)
+// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3x2xf32>) : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T1:.*]] = llvm.insertvalue %[[A]], %[[T0]][0] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[A]], %[[T1]][1] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[A]], %[[T2]][2] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[A]], %[[T3]][3] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       llvm.return %[[T4]] : !llvm.array<4 x array<3 x vec<2 x f32>>>
 
 func @broadcast_stretch(%arg0: vector<1xf32>) -> vector<4xf32> {
   %0 = vector.broadcast %arg0 : vector<1xf32> to vector<4xf32>
   return %0 : vector<4xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_stretch(
-// CHECK-SAME:  %[[A:.*]]: !llvm.vec<1 x float>)
+// CHECK-SAME:  %[[A:.*]]: !llvm.vec<1 x f32>)
 // CHECK:       %[[T0:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK:       %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : !llvm.vec<1 x float>
-// CHECK:       %[[T2:.*]] = llvm.mlir.undef : !llvm.vec<4 x float>
+// CHECK:       %[[T1:.*]] = llvm.extractelement %[[A]][%[[T0]] : i64] : !llvm.vec<1 x f32>
+// CHECK:       %[[T2:.*]] = llvm.mlir.undef : !llvm.vec<4 x f32>
 // CHECK:       %[[T3:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%3 : i32] : !llvm.vec<4 x float>
-// CHECK:       %[[T5:.*]] = llvm.shufflevector %[[T4]], %[[T2]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
-// CHECK:       llvm.return %[[T5]] : !llvm.vec<4 x float>
+// CHECK:       %[[T4:.*]] = llvm.insertelement %[[T1]], %[[T2]][%3 : i32] : !llvm.vec<4 x f32>
+// CHECK:       %[[T5:.*]] = llvm.shufflevector %[[T4]], %[[T2]] [0 : i32, 0 : i32, 0 : i32, 0 : i32] : !llvm.vec<4 x f32>, !llvm.vec<4 x f32>
+// CHECK:       llvm.return %[[T5]] : !llvm.vec<4 x f32>
 
 func @broadcast_stretch_at_start(%arg0: vector<1x4xf32>) -> vector<3x4xf32> {
   %0 = vector.broadcast %arg0 : vector<1x4xf32> to vector<3x4xf32>
   return %0 : vector<3x4xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_stretch_at_start(
-// CHECK-SAME:  %[[A:.*]]: !llvm.array<1 x vec<4 x float>>)
-// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x4xf32>) : !llvm.array<3 x vec<4 x float>>
-// CHECK:       %[[T1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<1 x vec<4 x float>>
-// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[T1]], %[[T0]][0] : !llvm.array<3 x vec<4 x float>>
-// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][1] : !llvm.array<3 x vec<4 x float>>
-// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[T1]], %[[T3]][2] : !llvm.array<3 x vec<4 x float>>
-// CHECK:       llvm.return %[[T4]] : !llvm.array<3 x vec<4 x float>>
+// CHECK-SAME:  %[[A:.*]]: !llvm.array<1 x vec<4 x f32>>)
+// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x4xf32>) : !llvm.array<3 x vec<4 x f32>>
+// CHECK:       %[[T1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<1 x vec<4 x f32>>
+// CHECK:       %[[T2:.*]] = llvm.insertvalue %[[T1]], %[[T0]][0] : !llvm.array<3 x vec<4 x f32>>
+// CHECK:       %[[T3:.*]] = llvm.insertvalue %[[T1]], %[[T2]][1] : !llvm.array<3 x vec<4 x f32>>
+// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[T1]], %[[T3]][2] : !llvm.array<3 x vec<4 x f32>>
+// CHECK:       llvm.return %[[T4]] : !llvm.array<3 x vec<4 x f32>>
 
 func @broadcast_stretch_at_end(%arg0: vector<4x1xf32>) -> vector<4x3xf32> {
   %0 = vector.broadcast %arg0 : vector<4x1xf32> to vector<4x3xf32>
   return %0 : vector<4x3xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_stretch_at_end(
-// CHECK-SAME:  %[[A:.*]]: !llvm.array<4 x vec<1 x float>>)
-// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3xf32>) : !llvm.array<4 x vec<3 x float>>
-// CHECK:       %[[T1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<4 x vec<1 x float>>
+// CHECK-SAME:  %[[A:.*]]: !llvm.array<4 x vec<1 x f32>>)
+// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3xf32>) : !llvm.array<4 x vec<3 x f32>>
+// CHECK:       %[[T1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<4 x vec<1 x f32>>
 // CHECK:       %[[T2:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK:       %[[T3:.*]] = llvm.extractelement %[[T1]][%[[T2]] : i64] : !llvm.vec<1 x float>
-// CHECK:       %[[T4:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+// CHECK:       %[[T3:.*]] = llvm.extractelement %[[T1]][%[[T2]] : i64] : !llvm.vec<1 x f32>
+// CHECK:       %[[T4:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 // CHECK:       %[[T5:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T6:.*]] = llvm.insertelement %[[T3]], %[[T4]][%[[T5]] : i32] : !llvm.vec<3 x float>
-// CHECK:       %[[T7:.*]] = llvm.shufflevector %[[T6]], %[[T4]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-// CHECK:       %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<4 x vec<3 x float>>
-// CHECK:       %[[T9:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<4 x vec<1 x float>>
+// CHECK:       %[[T6:.*]] = llvm.insertelement %[[T3]], %[[T4]][%[[T5]] : i32] : !llvm.vec<3 x f32>
+// CHECK:       %[[T7:.*]] = llvm.shufflevector %[[T6]], %[[T4]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+// CHECK:       %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<4 x vec<3 x f32>>
+// CHECK:       %[[T9:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<4 x vec<1 x f32>>
 // CHECK:       %[[T10:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK:       %[[T11:.*]] = llvm.extractelement %[[T9]][%[[T10]] : i64] : !llvm.vec<1 x float>
-// CHECK:       %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+// CHECK:       %[[T11:.*]] = llvm.extractelement %[[T9]][%[[T10]] : i64] : !llvm.vec<1 x f32>
+// CHECK:       %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 // CHECK:       %[[T13:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x float>
-// CHECK:       %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-// CHECK:       %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<4 x vec<3 x float>>
-// CHECK:       %[[T17:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<1 x float>>
+// CHECK:       %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x f32>
+// CHECK:       %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+// CHECK:       %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<4 x vec<3 x f32>>
+// CHECK:       %[[T17:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<1 x f32>>
 // CHECK:       %[[T18:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK:       %[[T19:.*]] = llvm.extractelement %[[T17]][%[[T18]] : i64] : !llvm.vec<1 x float>
-// CHECK:       %[[T20:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+// CHECK:       %[[T19:.*]] = llvm.extractelement %[[T17]][%[[T18]] : i64] : !llvm.vec<1 x f32>
+// CHECK:       %[[T20:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 // CHECK:       %[[T21:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T22:.*]] = llvm.insertelement %[[T19]], %[[T20]][%[[T21]] : i32] : !llvm.vec<3 x float>
-// CHECK:       %[[T23:.*]] = llvm.shufflevector %[[T22]], %[[T20]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-// CHECK:       %[[T24:.*]] = llvm.insertvalue %[[T23]], %[[T16]][2] : !llvm.array<4 x vec<3 x float>>
-// CHECK:       %[[T25:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<1 x float>>
+// CHECK:       %[[T22:.*]] = llvm.insertelement %[[T19]], %[[T20]][%[[T21]] : i32] : !llvm.vec<3 x f32>
+// CHECK:       %[[T23:.*]] = llvm.shufflevector %[[T22]], %[[T20]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+// CHECK:       %[[T24:.*]] = llvm.insertvalue %[[T23]], %[[T16]][2] : !llvm.array<4 x vec<3 x f32>>
+// CHECK:       %[[T25:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<1 x f32>>
 // CHECK:       %[[T26:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK:       %[[T27:.*]] = llvm.extractelement %[[T25]][%[[T26]] : i64] : !llvm.vec<1 x float>
-// CHECK:       %[[T28:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+// CHECK:       %[[T27:.*]] = llvm.extractelement %[[T25]][%[[T26]] : i64] : !llvm.vec<1 x f32>
+// CHECK:       %[[T28:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 // CHECK:       %[[T29:.*]] = llvm.mlir.constant(0 : i32) : i32
-// CHECK:       %[[T30:.*]] = llvm.insertelement %[[T27]], %[[T28]][%[[T29]] : i32] : !llvm.vec<3 x float>
-// CHECK:       %[[T31:.*]] = llvm.shufflevector %[[T30]], %[[T28]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-// CHECK:       %[[T32:.*]] = llvm.insertvalue %[[T31]], %[[T24]][3] : !llvm.array<4 x vec<3 x float>>
-// CHECK:       llvm.return %[[T32]] : !llvm.array<4 x vec<3 x float>>
+// CHECK:       %[[T30:.*]] = llvm.insertelement %[[T27]], %[[T28]][%[[T29]] : i32] : !llvm.vec<3 x f32>
+// CHECK:       %[[T31:.*]] = llvm.shufflevector %[[T30]], %[[T28]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+// CHECK:       %[[T32:.*]] = llvm.insertvalue %[[T31]], %[[T24]][3] : !llvm.array<4 x vec<3 x f32>>
+// CHECK:       llvm.return %[[T32]] : !llvm.array<4 x vec<3 x f32>>
 
 func @broadcast_stretch_in_middle(%arg0: vector<4x1x2xf32>) -> vector<4x3x2xf32> {
   %0 = vector.broadcast %arg0 : vector<4x1x2xf32> to vector<4x3x2xf32>
   return %0 : vector<4x3x2xf32>
 }
 // CHECK-LABEL: llvm.func @broadcast_stretch_in_middle(
-// CHECK-SAME:  %[[A:.*]]: !llvm.array<4 x array<1 x vec<2 x float>>>)
-// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3x2xf32>) : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x2xf32>) : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T2:.*]] = llvm.extractvalue %[[A]][0, 0] : !llvm.array<4 x array<1 x vec<2 x float>>>
-// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[T2]], %[[T1]][0] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T2]], %[[T4]][1] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T2]], %[[T5]][2] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T0]][0] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T8:.*]] = llvm.extractvalue %[[A]][1, 0] : !llvm.array<4 x array<1 x vec<2 x float>>>
-// CHECK:       %[[T10:.*]] = llvm.insertvalue %[[T8]], %[[T1]][0] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T11:.*]] = llvm.insertvalue %[[T8]], %[[T10]][1] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T12:.*]] = llvm.insertvalue %[[T8]], %[[T11]][2] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T13:.*]] = llvm.insertvalue %[[T12]], %[[T7]][1] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T14:.*]] = llvm.extractvalue %[[A]][2, 0] : !llvm.array<4 x array<1 x vec<2 x float>>>
-// CHECK:       %[[T16:.*]] = llvm.insertvalue %[[T14]], %[[T1]][0] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T17:.*]] = llvm.insertvalue %[[T14]], %[[T16]][1] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T18:.*]] = llvm.insertvalue %[[T14]], %[[T17]][2] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T19:.*]] = llvm.insertvalue %[[T18]], %[[T13]][2] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       %[[T20:.*]] = llvm.extractvalue %[[A]][3, 0] : !llvm.array<4 x array<1 x vec<2 x float>>>
-// CHECK:       %[[T22:.*]] = llvm.insertvalue %[[T20]], %[[T1]][0] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T23:.*]] = llvm.insertvalue %[[T20]], %[[T22]][1] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T24:.*]] = llvm.insertvalue %[[T20]], %[[T23]][2] : !llvm.array<3 x vec<2 x float>>
-// CHECK:       %[[T25:.*]] = llvm.insertvalue %[[T24]], %[[T19]][3] : !llvm.array<4 x array<3 x vec<2 x float>>>
-// CHECK:       llvm.return %[[T25]] : !llvm.array<4 x array<3 x vec<2 x float>>>
+// CHECK-SAME:  %[[A:.*]]: !llvm.array<4 x array<1 x vec<2 x f32>>>)
+// CHECK:       %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<4x3x2xf32>) : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<3x2xf32>) : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T2:.*]] = llvm.extractvalue %[[A]][0, 0] : !llvm.array<4 x array<1 x vec<2 x f32>>>
+// CHECK:       %[[T4:.*]] = llvm.insertvalue %[[T2]], %[[T1]][0] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T5:.*]] = llvm.insertvalue %[[T2]], %[[T4]][1] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T6:.*]] = llvm.insertvalue %[[T2]], %[[T5]][2] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T0]][0] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T8:.*]] = llvm.extractvalue %[[A]][1, 0] : !llvm.array<4 x array<1 x vec<2 x f32>>>
+// CHECK:       %[[T10:.*]] = llvm.insertvalue %[[T8]], %[[T1]][0] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T11:.*]] = llvm.insertvalue %[[T8]], %[[T10]][1] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T12:.*]] = llvm.insertvalue %[[T8]], %[[T11]][2] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T13:.*]] = llvm.insertvalue %[[T12]], %[[T7]][1] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T14:.*]] = llvm.extractvalue %[[A]][2, 0] : !llvm.array<4 x array<1 x vec<2 x f32>>>
+// CHECK:       %[[T16:.*]] = llvm.insertvalue %[[T14]], %[[T1]][0] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T17:.*]] = llvm.insertvalue %[[T14]], %[[T16]][1] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T18:.*]] = llvm.insertvalue %[[T14]], %[[T17]][2] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T19:.*]] = llvm.insertvalue %[[T18]], %[[T13]][2] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       %[[T20:.*]] = llvm.extractvalue %[[A]][3, 0] : !llvm.array<4 x array<1 x vec<2 x f32>>>
+// CHECK:       %[[T22:.*]] = llvm.insertvalue %[[T20]], %[[T1]][0] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T23:.*]] = llvm.insertvalue %[[T20]], %[[T22]][1] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T24:.*]] = llvm.insertvalue %[[T20]], %[[T23]][2] : !llvm.array<3 x vec<2 x f32>>
+// CHECK:       %[[T25:.*]] = llvm.insertvalue %[[T24]], %[[T19]][3] : !llvm.array<4 x array<3 x vec<2 x f32>>>
+// CHECK:       llvm.return %[[T25]] : !llvm.array<4 x array<3 x vec<2 x f32>>>
 
 func @outerproduct(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<2x3xf32> {
   %2 = vector.outerproduct %arg0, %arg1 : vector<2xf32>, vector<3xf32>
   return %2 : vector<2x3xf32>
 }
 // CHECK-LABEL: llvm.func @outerproduct(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>,
-// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>)
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x f32>,
+// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x f32>)
 //      CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x3xf32>)
 //      CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64
-//      CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x float>
-//      CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+//      CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x f32>
+//      CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 //      CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : i32
-//      CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%4 : i32] : !llvm.vec<3 x float>
-//      CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-//      CHECK: %[[T7:.*]] = llvm.fmul %[[T6]], %[[B]] : !llvm.vec<3 x float>
-//      CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>>
+//      CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%4 : i32] : !llvm.vec<3 x f32>
+//      CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+//      CHECK: %[[T7:.*]] = llvm.fmul %[[T6]], %[[B]] : !llvm.vec<3 x f32>
+//      CHECK: %[[T8:.*]] = llvm.insertvalue %[[T7]], %[[T0]][0] : !llvm.array<2 x vec<3 x f32>>
 //      CHECK: %[[T9:.*]] = llvm.mlir.constant(1 : i64) : i64
-//      CHECK: %[[T10:.*]] = llvm.extractelement %[[A]][%9 : i64] : !llvm.vec<2 x float>
-//      CHECK: %[[T11:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+//      CHECK: %[[T10:.*]] = llvm.extractelement %[[A]][%9 : i64] : !llvm.vec<2 x f32>
+//      CHECK: %[[T11:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 //      CHECK: %[[T12:.*]] = llvm.mlir.constant(0 : i32) : i32
-//      CHECK: %[[T13:.*]] = llvm.insertelement %[[T10]], %[[T11]][%12 : i32] : !llvm.vec<3 x float>
-//      CHECK: %[[T14:.*]] = llvm.shufflevector %[[T13]], %[[T11]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-//      CHECK: %[[T15:.*]] = llvm.fmul %[[T14]], %[[B]] : !llvm.vec<3 x float>
-//      CHECK: %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<2 x vec<3 x float>>
-//      CHECK: llvm.return %[[T16]] : !llvm.array<2 x vec<3 x float>>
+//      CHECK: %[[T13:.*]] = llvm.insertelement %[[T10]], %[[T11]][%12 : i32] : !llvm.vec<3 x f32>
+//      CHECK: %[[T14:.*]] = llvm.shufflevector %[[T13]], %[[T11]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+//      CHECK: %[[T15:.*]] = llvm.fmul %[[T14]], %[[B]] : !llvm.vec<3 x f32>
+//      CHECK: %[[T16:.*]] = llvm.insertvalue %[[T15]], %[[T8]][1] : !llvm.array<2 x vec<3 x f32>>
+//      CHECK: llvm.return %[[T16]] : !llvm.array<2 x vec<3 x f32>>
 
 func @outerproduct_add(%arg0: vector<2xf32>, %arg1: vector<3xf32>, %arg2: vector<2x3xf32>) -> vector<2x3xf32> {
   %2 = vector.outerproduct %arg0, %arg1, %arg2 : vector<2xf32>, vector<3xf32>
   return %2 : vector<2x3xf32>
 }
 // CHECK-LABEL: llvm.func @outerproduct_add(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>,
-// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>,
-// CHECK-SAME: %[[C:.*]]: !llvm.array<2 x vec<3 x float>>)
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x f32>,
+// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x f32>,
+// CHECK-SAME: %[[C:.*]]: !llvm.array<2 x vec<3 x f32>>)
 //      CHECK: %[[T0:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x3xf32>)
 //      CHECK: %[[T1:.*]] = llvm.mlir.constant(0 : i64) : i64
-//      CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x float>
-//      CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+//      CHECK: %[[T2:.*]] = llvm.extractelement %[[A]][%[[T1]] : i64] : !llvm.vec<2 x f32>
+//      CHECK: %[[T3:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 //      CHECK: %[[T4:.*]] = llvm.mlir.constant(0 : i32) : i32
-//      CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%[[T4]] : i32] : !llvm.vec<3 x float>
-//      CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-//      CHECK: %[[T7:.*]] = llvm.extractvalue %[[C]][0] : !llvm.array<2 x vec<3 x float>>
-//      CHECK: %[[T8:.*]] = "llvm.intr.fmuladd"(%[[T6]], %[[B]], %[[T7]]) : (!llvm.vec<3 x float>, !llvm.vec<3 x float>, !llvm.vec<3 x float>)
-//      CHECK: %[[T9:.*]] = llvm.insertvalue %[[T8]], %[[T0]][0] : !llvm.array<2 x vec<3 x float>>
+//      CHECK: %[[T5:.*]] = llvm.insertelement %[[T2]], %[[T3]][%[[T4]] : i32] : !llvm.vec<3 x f32>
+//      CHECK: %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T3]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+//      CHECK: %[[T7:.*]] = llvm.extractvalue %[[C]][0] : !llvm.array<2 x vec<3 x f32>>
+//      CHECK: %[[T8:.*]] = "llvm.intr.fmuladd"(%[[T6]], %[[B]], %[[T7]]) : (!llvm.vec<3 x f32>, !llvm.vec<3 x f32>, !llvm.vec<3 x f32>)
+//      CHECK: %[[T9:.*]] = llvm.insertvalue %[[T8]], %[[T0]][0] : !llvm.array<2 x vec<3 x f32>>
 //      CHECK: %[[T10:.*]] = llvm.mlir.constant(1 : i64) : i64
-//      CHECK: %[[T11:.*]] = llvm.extractelement %[[A]][%[[T10]] : i64] : !llvm.vec<2 x float>
-//      CHECK: %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x float>
+//      CHECK: %[[T11:.*]] = llvm.extractelement %[[A]][%[[T10]] : i64] : !llvm.vec<2 x f32>
+//      CHECK: %[[T12:.*]] = llvm.mlir.undef : !llvm.vec<3 x f32>
 //      CHECK: %[[T13:.*]] = llvm.mlir.constant(0 : i32) : i32
-//      CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x float>
-//      CHECK: %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-//      CHECK: %[[T16:.*]] = llvm.extractvalue %[[C]][1] : !llvm.array<2 x vec<3 x float>>
-//      CHECK: %[[T17:.*]] = "llvm.intr.fmuladd"(%[[T15]], %[[B]], %[[T16]]) : (!llvm.vec<3 x float>, !llvm.vec<3 x float>, !llvm.vec<3 x float>)
-//      CHECK: %[[T18:.*]] = llvm.insertvalue %[[T17]], %[[T9]][1] : !llvm.array<2 x vec<3 x float>>
-//      CHECK: llvm.return %[[T18]] : !llvm.array<2 x vec<3 x float>>
+//      CHECK: %[[T14:.*]] = llvm.insertelement %[[T11]], %[[T12]][%[[T13]] : i32] : !llvm.vec<3 x f32>
+//      CHECK: %[[T15:.*]] = llvm.shufflevector %[[T14]], %[[T12]] [0 : i32, 0 : i32, 0 : i32] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+//      CHECK: %[[T16:.*]] = llvm.extractvalue %[[C]][1] : !llvm.array<2 x vec<3 x f32>>
+//      CHECK: %[[T17:.*]] = "llvm.intr.fmuladd"(%[[T15]], %[[B]], %[[T16]]) : (!llvm.vec<3 x f32>, !llvm.vec<3 x f32>, !llvm.vec<3 x f32>)
+//      CHECK: %[[T18:.*]] = llvm.insertvalue %[[T17]], %[[T9]][1] : !llvm.array<2 x vec<3 x f32>>
+//      CHECK: llvm.return %[[T18]] : !llvm.array<2 x vec<3 x f32>>
 
 func @shuffle_1D_direct(%arg0: vector<2xf32>, %arg1: vector<2xf32>) -> vector<2xf32> {
   %1 = vector.shuffle %arg0, %arg1 [0, 1] : vector<2xf32>, vector<2xf32>
   return %1 : vector<2xf32>
 }
 // CHECK-LABEL: llvm.func @shuffle_1D_direct(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>,
-// CHECK-SAME: %[[B:.*]]: !llvm.vec<2 x float>)
-//       CHECK:   %[[s:.*]] = llvm.shufflevector %[[A]], %[[B]] [0, 1] : !llvm.vec<2 x float>, !llvm.vec<2 x float>
-//       CHECK:   llvm.return %[[s]] : !llvm.vec<2 x float>
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x f32>,
+// CHECK-SAME: %[[B:.*]]: !llvm.vec<2 x f32>)
+//       CHECK:   %[[s:.*]] = llvm.shufflevector %[[A]], %[[B]] [0, 1] : !llvm.vec<2 x f32>, !llvm.vec<2 x f32>
+//       CHECK:   llvm.return %[[s]] : !llvm.vec<2 x f32>
 
 func @shuffle_1D(%arg0: vector<2xf32>, %arg1: vector<3xf32>) -> vector<5xf32> {
   %1 = vector.shuffle %arg0, %arg1 [4, 3, 2, 1, 0] : vector<2xf32>, vector<3xf32>
   return %1 : vector<5xf32>
 }
 // CHECK-LABEL: llvm.func @shuffle_1D(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x float>,
-// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x float>)
-//       CHECK:   %[[u0:.*]] = llvm.mlir.undef : !llvm.vec<5 x float>
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<2 x f32>,
+// CHECK-SAME: %[[B:.*]]: !llvm.vec<3 x f32>)
+//       CHECK:   %[[u0:.*]] = llvm.mlir.undef : !llvm.vec<5 x f32>
 //       CHECK:   %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64
-//       CHECK:   %[[e1:.*]] = llvm.extractelement %[[B]][%[[c2]] : i64] : !llvm.vec<3 x float>
+//       CHECK:   %[[e1:.*]] = llvm.extractelement %[[B]][%[[c2]] : i64] : !llvm.vec<3 x f32>
 //       CHECK:   %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
-//       CHECK:   %[[i1:.*]] = llvm.insertelement %[[e1]], %[[u0]][%[[c0]] : i64] : !llvm.vec<5 x float>
+//       CHECK:   %[[i1:.*]] = llvm.insertelement %[[e1]], %[[u0]][%[[c0]] : i64] : !llvm.vec<5 x f32>
 //       CHECK:   %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
-//       CHECK:   %[[e2:.*]] = llvm.extractelement %[[B]][%[[c1]] : i64] : !llvm.vec<3 x float>
+//       CHECK:   %[[e2:.*]] = llvm.extractelement %[[B]][%[[c1]] : i64] : !llvm.vec<3 x f32>
 //       CHECK:   %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
-//       CHECK:   %[[i2:.*]] = llvm.insertelement %[[e2]], %[[i1]][%[[c1]] : i64] : !llvm.vec<5 x float>
+//       CHECK:   %[[i2:.*]] = llvm.insertelement %[[e2]], %[[i1]][%[[c1]] : i64] : !llvm.vec<5 x f32>
 //       CHECK:   %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
-//       CHECK:   %[[e3:.*]] = llvm.extractelement %[[B]][%[[c0]] : i64] : !llvm.vec<3 x float>
+//       CHECK:   %[[e3:.*]] = llvm.extractelement %[[B]][%[[c0]] : i64] : !llvm.vec<3 x f32>
 //       CHECK:   %[[c2:.*]] = llvm.mlir.constant(2 : index) : i64
-//       CHECK:   %[[i3:.*]] = llvm.insertelement %[[e3]], %[[i2]][%[[c2]] : i64] : !llvm.vec<5 x float>
+//       CHECK:   %[[i3:.*]] = llvm.insertelement %[[e3]], %[[i2]][%[[c2]] : i64] : !llvm.vec<5 x f32>
 //       CHECK:   %[[c1:.*]] = llvm.mlir.constant(1 : index) : i64
-//       CHECK:   %[[e4:.*]] = llvm.extractelement %[[A]][%[[c1]] : i64] : !llvm.vec<2 x float>
+//       CHECK:   %[[e4:.*]] = llvm.extractelement %[[A]][%[[c1]] : i64] : !llvm.vec<2 x f32>
 //       CHECK:   %[[c3:.*]] = llvm.mlir.constant(3 : index) : i64
-//       CHECK:   %[[i4:.*]] = llvm.insertelement %[[e4]], %[[i3]][%[[c3]] : i64] : !llvm.vec<5 x float>
+//       CHECK:   %[[i4:.*]] = llvm.insertelement %[[e4]], %[[i3]][%[[c3]] : i64] : !llvm.vec<5 x f32>
 //       CHECK:   %[[c0:.*]] = llvm.mlir.constant(0 : index) : i64
-//       CHECK:   %[[e5:.*]] = llvm.extractelement %[[A]][%[[c0]] : i64] : !llvm.vec<2 x float>
+//       CHECK:   %[[e5:.*]] = llvm.extractelement %[[A]][%[[c0]] : i64] : !llvm.vec<2 x f32>
 //       CHECK:   %[[c4:.*]] = llvm.mlir.constant(4 : index) : i64
-//       CHECK:   %[[i5:.*]] = llvm.insertelement %[[e5]], %[[i4]][%[[c4]] : i64] : !llvm.vec<5 x float>
-//       CHECK:   llvm.return %[[i5]] : !llvm.vec<5 x float>
+//       CHECK:   %[[i5:.*]] = llvm.insertelement %[[e5]], %[[i4]][%[[c4]] : i64] : !llvm.vec<5 x f32>
+//       CHECK:   llvm.return %[[i5]] : !llvm.vec<5 x f32>
 
 func @shuffle_2D(%a: vector<1x4xf32>, %b: vector<2x4xf32>) -> vector<3x4xf32> {
   %1 = vector.shuffle %a, %b[1, 0, 2] : vector<1x4xf32>, vector<2x4xf32>
   return %1 : vector<3x4xf32>
 }
 // CHECK-LABEL: llvm.func @shuffle_2D(
-// CHECK-SAME: %[[A:.*]]: !llvm.array<1 x vec<4 x float>>,
-// CHECK-SAME: %[[B:.*]]: !llvm.array<2 x vec<4 x float>>)
-//       CHECK:   %[[u0:.*]] = llvm.mlir.undef : !llvm.array<3 x vec<4 x float>>
-//       CHECK:   %[[e1:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x float>>
-//       CHECK:   %[[i1:.*]] = llvm.insertvalue %[[e1]], %[[u0]][0] : !llvm.array<3 x vec<4 x float>>
-//       CHECK:   %[[e2:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<1 x vec<4 x float>>
-//       CHECK:   %[[i2:.*]] = llvm.insertvalue %[[e2]], %[[i1]][1] : !llvm.array<3 x vec<4 x float>>
-//       CHECK:   %[[e3:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x float>>
-//       CHECK:   %[[i3:.*]] = llvm.insertvalue %[[e3]], %[[i2]][2] : !llvm.array<3 x vec<4 x float>>
-//       CHECK:   llvm.return %[[i3]] : !llvm.array<3 x vec<4 x float>>
+// CHECK-SAME: %[[A:.*]]: !llvm.array<1 x vec<4 x f32>>,
+// CHECK-SAME: %[[B:.*]]: !llvm.array<2 x vec<4 x f32>>)
+//       CHECK:   %[[u0:.*]] = llvm.mlir.undef : !llvm.array<3 x vec<4 x f32>>
+//       CHECK:   %[[e1:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x f32>>
+//       CHECK:   %[[i1:.*]] = llvm.insertvalue %[[e1]], %[[u0]][0] : !llvm.array<3 x vec<4 x f32>>
+//       CHECK:   %[[e2:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<1 x vec<4 x f32>>
+//       CHECK:   %[[i2:.*]] = llvm.insertvalue %[[e2]], %[[i1]][1] : !llvm.array<3 x vec<4 x f32>>
+//       CHECK:   %[[e3:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x f32>>
+//       CHECK:   %[[i3:.*]] = llvm.insertvalue %[[e3]], %[[i2]][2] : !llvm.array<3 x vec<4 x f32>>
+//       CHECK:   llvm.return %[[i3]] : !llvm.array<3 x vec<4 x f32>>
 
 func @extract_element(%arg0: vector<16xf32>) -> f32 {
   %0 = constant 15 : i32
@@ -311,10 +311,10 @@ func @extract_element(%arg0: vector<16xf32>) -> f32 {
   return %1 : f32
 }
 // CHECK-LABEL: llvm.func @extract_element(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x float>)
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x f32>)
 //       CHECK:   %[[c:.*]] = llvm.mlir.constant(15 : i32) : i32
-//       CHECK:   %[[x:.*]] = llvm.extractelement %[[A]][%[[c]] : i32] : !llvm.vec<16 x float>
-//       CHECK:   llvm.return %[[x]] : !llvm.float
+//       CHECK:   %[[x:.*]] = llvm.extractelement %[[A]][%[[c]] : i32] : !llvm.vec<16 x f32>
+//       CHECK:   llvm.return %[[x]] : f32
 
 func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.extract %arg0[15]: vector<16xf32>
@@ -322,34 +322,34 @@ func @extract_element_from_vec_1d(%arg0: vector<16xf32>) -> f32 {
 }
 // CHECK-LABEL: llvm.func @extract_element_from_vec_1d
 //       CHECK:   llvm.mlir.constant(15 : i64) : i64
-//       CHECK:   llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float>
-//       CHECK:   llvm.return {{.*}} : !llvm.float
+//       CHECK:   llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x f32>
+//       CHECK:   llvm.return {{.*}} : f32
 
 func @extract_vec_2d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<3x16xf32> {
   %0 = vector.extract %arg0[0]: vector<4x3x16xf32>
   return %0 : vector<3x16xf32>
 }
 // CHECK-LABEL: llvm.func @extract_vec_2d_from_vec_3d
-//       CHECK:   llvm.extractvalue {{.*}}[0] : !llvm.array<4 x array<3 x vec<16 x float>>>
-//       CHECK:   llvm.return {{.*}} : !llvm.array<3 x vec<16 x float>>
+//       CHECK:   llvm.extractvalue {{.*}}[0] : !llvm.array<4 x array<3 x vec<16 x f32>>>
+//       CHECK:   llvm.return {{.*}} : !llvm.array<3 x vec<16 x f32>>
 
 func @extract_vec_1d_from_vec_3d(%arg0: vector<4x3x16xf32>) -> vector<16xf32> {
   %0 = vector.extract %arg0[0, 0]: vector<4x3x16xf32>
   return %0 : vector<16xf32>
 }
 // CHECK-LABEL: llvm.func @extract_vec_1d_from_vec_3d
-//       CHECK:   llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vec<16 x float>>>
-//       CHECK:   llvm.return {{.*}} : !llvm.vec<16 x float>
+//       CHECK:   llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vec<16 x f32>>>
+//       CHECK:   llvm.return {{.*}} : !llvm.vec<16 x f32>
 
 func @extract_element_from_vec_3d(%arg0: vector<4x3x16xf32>) -> f32 {
   %0 = vector.extract %arg0[0, 0, 0]: vector<4x3x16xf32>
   return %0 : f32
 }
 // CHECK-LABEL: llvm.func @extract_element_from_vec_3d
-//       CHECK:   llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vec<16 x float>>>
+//       CHECK:   llvm.extractvalue {{.*}}[0, 0] : !llvm.array<4 x array<3 x vec<16 x f32>>>
 //       CHECK:   llvm.mlir.constant(0 : i64) : i64
-//       CHECK:   llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float>
-//       CHECK:   llvm.return {{.*}} : !llvm.float
+//       CHECK:   llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<16 x f32>
+//       CHECK:   llvm.return {{.*}} : f32
 
 func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = constant 3 : i32
@@ -357,11 +357,11 @@ func @insert_element(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   return %1 : vector<4xf32>
 }
 // CHECK-LABEL: llvm.func @insert_element(
-// CHECK-SAME: %[[A:.*]]: !llvm.float,
-// CHECK-SAME: %[[B:.*]]: !llvm.vec<4 x float>)
+// CHECK-SAME: %[[A:.*]]: f32,
+// CHECK-SAME: %[[B:.*]]: !llvm.vec<4 x f32>)
 //       CHECK:   %[[c:.*]] = llvm.mlir.constant(3 : i32) : i32
-//       CHECK:   %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[c]] : i32] : !llvm.vec<4 x float>
-//       CHECK:   llvm.return %[[x]] : !llvm.vec<4 x float>
+//       CHECK:   %[[x:.*]] = llvm.insertelement %[[A]], %[[B]][%[[c]] : i32] : !llvm.vec<4 x f32>
+//       CHECK:   llvm.return %[[x]] : !llvm.vec<4 x f32>
 
 func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.insert %arg0, %arg1[3] : f32 into vector<4xf32>
@@ -369,65 +369,65 @@ func @insert_element_into_vec_1d(%arg0: f32, %arg1: vector<4xf32>) -> vector<4xf
 }
 // CHECK-LABEL: llvm.func @insert_element_into_vec_1d
 //       CHECK:   llvm.mlir.constant(3 : i64) : i64
-//       CHECK:   llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
-//       CHECK:   llvm.return {{.*}} : !llvm.vec<4 x float>
+//       CHECK:   llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x f32>
+//       CHECK:   llvm.return {{.*}} : !llvm.vec<4 x f32>
 
 func @insert_vec_2d_into_vec_3d(%arg0: vector<8x16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
   %0 = vector.insert %arg0, %arg1[3] : vector<8x16xf32> into vector<4x8x16xf32>
   return %0 : vector<4x8x16xf32>
 }
 // CHECK-LABEL: llvm.func @insert_vec_2d_into_vec_3d
-//       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x array<8 x vec<16 x float>>>
-//       CHECK:   llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x float>>>
+//       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x array<8 x vec<16 x f32>>>
+//       CHECK:   llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x f32>>>
 
 func @insert_vec_1d_into_vec_3d(%arg0: vector<16xf32>, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
   %0 = vector.insert %arg0, %arg1[3, 7] : vector<16xf32> into vector<4x8x16xf32>
   return %0 : vector<4x8x16xf32>
 }
 // CHECK-LABEL: llvm.func @insert_vec_1d_into_vec_3d
-//       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x float>>>
-//       CHECK:   llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x float>>>
+//       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x f32>>>
+//       CHECK:   llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x f32>>>
 
 func @insert_element_into_vec_3d(%arg0: f32, %arg1: vector<4x8x16xf32>) -> vector<4x8x16xf32> {
   %0 = vector.insert %arg0, %arg1[3, 7, 15] : f32 into vector<4x8x16xf32>
   return %0 : vector<4x8x16xf32>
 }
 // CHECK-LABEL: llvm.func @insert_element_into_vec_3d
-//       CHECK:   llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x float>>>
+//       CHECK:   llvm.extractvalue {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x f32>>>
 //       CHECK:   llvm.mlir.constant(15 : i64) : i64
-//       CHECK:   llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<16 x float>
-//       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x float>>>
-//       CHECK:   llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x float>>>
+//       CHECK:   llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<16 x f32>
+//       CHECK:   llvm.insertvalue {{.*}}, {{.*}}[3, 7] : !llvm.array<4 x array<8 x vec<16 x f32>>>
+//       CHECK:   llvm.return {{.*}} : !llvm.array<4 x array<8 x vec<16 x f32>>>
 
 func @vector_type_cast(%arg0: memref<8x8x8xf32>) -> memref<vector<8x8x8xf32>> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32> to memref<vector<8x8x8xf32>>
   return %0 : memref<vector<8x8x8xf32>>
 }
 // CHECK-LABEL: llvm.func @vector_type_cast
-//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>>, ptr<array<8 x array<8 x vec<8 x float>>>>, i64)>
-//       CHECK:   %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   %[[allocatedBit:.*]] = llvm.bitcast %[[allocated]] : !llvm.ptr<float> to !llvm.ptr<array<8 x array<8 x vec<8 x float>>>>
-//       CHECK:   llvm.insertvalue %[[allocatedBit]], {{.*}}[0] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>>, ptr<array<8 x array<8 x vec<8 x float>>>>, i64)>
-//       CHECK:   %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   %[[alignedBit:.*]] = llvm.bitcast %[[aligned]] : !llvm.ptr<float> to !llvm.ptr<array<8 x array<8 x vec<8 x float>>>>
-//       CHECK:   llvm.insertvalue %[[alignedBit]], {{.*}}[1] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>>, ptr<array<8 x array<8 x vec<8 x float>>>>, i64)>
+//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>>, ptr<array<8 x array<8 x vec<8 x f32>>>>, i64)>
+//       CHECK:   %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   %[[allocatedBit:.*]] = llvm.bitcast %[[allocated]] : !llvm.ptr<f32> to !llvm.ptr<array<8 x array<8 x vec<8 x f32>>>>
+//       CHECK:   llvm.insertvalue %[[allocatedBit]], {{.*}}[0] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>>, ptr<array<8 x array<8 x vec<8 x f32>>>>, i64)>
+//       CHECK:   %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   %[[alignedBit:.*]] = llvm.bitcast %[[aligned]] : !llvm.ptr<f32> to !llvm.ptr<array<8 x array<8 x vec<8 x f32>>>>
+//       CHECK:   llvm.insertvalue %[[alignedBit]], {{.*}}[1] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>>, ptr<array<8 x array<8 x vec<8 x f32>>>>, i64)>
 //       CHECK:   llvm.mlir.constant(0 : index
-//       CHECK:   llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>>, ptr<array<8 x array<8 x vec<8 x float>>>>, i64)>
+//       CHECK:   llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>>, ptr<array<8 x array<8 x vec<8 x f32>>>>, i64)>
 
 func @vector_type_cast_non_zero_addrspace(%arg0: memref<8x8x8xf32, 3>) -> memref<vector<8x8x8xf32>, 3> {
   %0 = vector.type_cast %arg0: memref<8x8x8xf32, 3> to memref<vector<8x8x8xf32>, 3>
   return %0 : memref<vector<8x8x8xf32>, 3>
 }
 // CHECK-LABEL: llvm.func @vector_type_cast_non_zero_addrspace
-//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>, 3>, ptr<array<8 x array<8 x vec<8 x float>>>, 3>, i64)>
-//       CHECK:   %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   %[[allocatedBit:.*]] = llvm.bitcast %[[allocated]] : !llvm.ptr<float, 3> to !llvm.ptr<array<8 x array<8 x vec<8 x float>>>, 3>
-//       CHECK:   llvm.insertvalue %[[allocatedBit]], {{.*}}[0] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>, 3>, ptr<array<8 x array<8 x vec<8 x float>>>, 3>, i64)>
-//       CHECK:   %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:   %[[alignedBit:.*]] = llvm.bitcast %[[aligned]] : !llvm.ptr<float, 3> to !llvm.ptr<array<8 x array<8 x vec<8 x float>>>, 3>
-//       CHECK:   llvm.insertvalue %[[alignedBit]], {{.*}}[1] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>, 3>, ptr<array<8 x array<8 x vec<8 x float>>>, 3>, i64)>
+//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, i64)>
+//       CHECK:   %[[allocated:.*]] = llvm.extractvalue {{.*}}[0] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   %[[allocatedBit:.*]] = llvm.bitcast %[[allocated]] : !llvm.ptr<f32, 3> to !llvm.ptr<array<8 x array<8 x vec<8 x f32>>>, 3>
+//       CHECK:   llvm.insertvalue %[[allocatedBit]], {{.*}}[0] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, i64)>
+//       CHECK:   %[[aligned:.*]] = llvm.extractvalue {{.*}}[1] : !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:   %[[alignedBit:.*]] = llvm.bitcast %[[aligned]] : !llvm.ptr<f32, 3> to !llvm.ptr<array<8 x array<8 x vec<8 x f32>>>, 3>
+//       CHECK:   llvm.insertvalue %[[alignedBit]], {{.*}}[1] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, i64)>
 //       CHECK:   llvm.mlir.constant(0 : index
-//       CHECK:   llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x float>>>, 3>, ptr<array<8 x array<8 x vec<8 x float>>>, 3>, i64)>
+//       CHECK:   llvm.insertvalue {{.*}}[2] : !llvm.struct<(ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, ptr<array<8 x array<8 x vec<8 x f32>>>, 3>, i64)>
 
 func @vector_print_scalar_i1(%arg0: i1) {
   vector.print %arg0 : i1
@@ -553,8 +553,8 @@ func @vector_print_scalar_f32(%arg0: f32) {
   return
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_f32(
-// CHECK-SAME: %[[A:.*]]: !llvm.float)
-//       CHECK:    llvm.call @printF32(%[[A]]) : (!llvm.float) -> ()
+// CHECK-SAME: %[[A:.*]]: f32)
+//       CHECK:    llvm.call @printF32(%[[A]]) : (f32) -> ()
 //       CHECK:    llvm.call @printNewline() : () -> ()
 
 func @vector_print_scalar_f64(%arg0: f64) {
@@ -562,8 +562,8 @@ func @vector_print_scalar_f64(%arg0: f64) {
   return
 }
 // CHECK-LABEL: llvm.func @vector_print_scalar_f64(
-// CHECK-SAME: %[[A:.*]]: !llvm.double)
-//       CHECK:    llvm.call @printF64(%[[A]]) : (!llvm.double) -> ()
+// CHECK-SAME: %[[A:.*]]: f64)
+//       CHECK:    llvm.call @printF64(%[[A]]) : (f64) -> ()
 //       CHECK:    llvm.call @printNewline() : () -> ()
 
 func @vector_print_vector(%arg0: vector<2x2xf32>) {
@@ -571,28 +571,28 @@ func @vector_print_vector(%arg0: vector<2x2xf32>) {
   return
 }
 // CHECK-LABEL: llvm.func @vector_print_vector(
-// CHECK-SAME: %[[A:.*]]: !llvm.array<2 x vec<2 x float>>)
+// CHECK-SAME: %[[A:.*]]: !llvm.array<2 x vec<2 x f32>>)
 //       CHECK:    llvm.call @printOpen() : () -> ()
-//       CHECK:    %[[x0:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<2 x float>>
+//       CHECK:    %[[x0:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<2 x f32>>
 //       CHECK:    llvm.call @printOpen() : () -> ()
 //       CHECK:    %[[x1:.*]] = llvm.mlir.constant(0 : index) : i64
-//       CHECK:    %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @printF32(%[[x2]]) : (!llvm.float) -> ()
+//       CHECK:    %[[x2:.*]] = llvm.extractelement %[[x0]][%[[x1]] : i64] : !llvm.vec<2 x f32>
+//       CHECK:    llvm.call @printF32(%[[x2]]) : (f32) -> ()
 //       CHECK:    llvm.call @printComma() : () -> ()
 //       CHECK:    %[[x3:.*]] = llvm.mlir.constant(1 : index) : i64
-//       CHECK:    %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @printF32(%[[x4]]) : (!llvm.float) -> ()
+//       CHECK:    %[[x4:.*]] = llvm.extractelement %[[x0]][%[[x3]] : i64] : !llvm.vec<2 x f32>
+//       CHECK:    llvm.call @printF32(%[[x4]]) : (f32) -> ()
 //       CHECK:    llvm.call @printClose() : () -> ()
 //       CHECK:    llvm.call @printComma() : () -> ()
-//       CHECK:    %[[x5:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<2 x float>>
+//       CHECK:    %[[x5:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<2 x f32>>
 //       CHECK:    llvm.call @printOpen() : () -> ()
 //       CHECK:    %[[x6:.*]] = llvm.mlir.constant(0 : index) : i64
-//       CHECK:    %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @printF32(%[[x7]]) : (!llvm.float) -> ()
+//       CHECK:    %[[x7:.*]] = llvm.extractelement %[[x5]][%[[x6]] : i64] : !llvm.vec<2 x f32>
+//       CHECK:    llvm.call @printF32(%[[x7]]) : (f32) -> ()
 //       CHECK:    llvm.call @printComma() : () -> ()
 //       CHECK:    %[[x8:.*]] = llvm.mlir.constant(1 : index) : i64
-//       CHECK:    %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : i64] : !llvm.vec<2 x float>
-//       CHECK:    llvm.call @printF32(%[[x9]]) : (!llvm.float) -> ()
+//       CHECK:    %[[x9:.*]] = llvm.extractelement %[[x5]][%[[x8]] : i64] : !llvm.vec<2 x f32>
+//       CHECK:    llvm.call @printF32(%[[x9]]) : (f32) -> ()
 //       CHECK:    llvm.call @printClose() : () -> ()
 //       CHECK:    llvm.call @printClose() : () -> ()
 //       CHECK:    llvm.call @printNewline() : () -> ()
@@ -602,45 +602,45 @@ func @extract_strided_slice1(%arg0: vector<4xf32>) -> vector<2xf32> {
   return %0 : vector<2xf32>
 }
 // CHECK-LABEL: llvm.func @extract_strided_slice1(
-//  CHECK-SAME:    %[[A:.*]]: !llvm.vec<4 x float>)
-//       CHECK:    %[[T0:.*]] = llvm.shufflevector %[[A]], %[[A]] [2, 3] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
-//       CHECK:    llvm.return %[[T0]] : !llvm.vec<2 x float>
+//  CHECK-SAME:    %[[A:.*]]: !llvm.vec<4 x f32>)
+//       CHECK:    %[[T0:.*]] = llvm.shufflevector %[[A]], %[[A]] [2, 3] : !llvm.vec<4 x f32>, !llvm.vec<4 x f32>
+//       CHECK:    llvm.return %[[T0]] : !llvm.vec<2 x f32>
 
 func @extract_strided_slice2(%arg0: vector<4x8xf32>) -> vector<2x8xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2], sizes = [2], strides = [1]} : vector<4x8xf32> to vector<2x8xf32>
   return %0 : vector<2x8xf32>
 }
 // CHECK-LABEL: llvm.func @extract_strided_slice2(
-//  CHECK-SAME:    %[[A:.*]]: !llvm.array<4 x vec<8 x float>>)
-//       CHECK:    %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x vec<8 x float>>
-//       CHECK:    %[[T1:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<8 x float>>
-//       CHECK:    %[[T2:.*]] = llvm.insertvalue %[[T1]], %[[T0]][0] : !llvm.array<2 x vec<8 x float>>
-//       CHECK:    %[[T3:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<8 x float>>
-//       CHECK:    %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T2]][1] : !llvm.array<2 x vec<8 x float>>
-//       CHECK:    llvm.return %[[T4]] : !llvm.array<2 x vec<8 x float>>
+//  CHECK-SAME:    %[[A:.*]]: !llvm.array<4 x vec<8 x f32>>)
+//       CHECK:    %[[T0:.*]] = llvm.mlir.undef : !llvm.array<2 x vec<8 x f32>>
+//       CHECK:    %[[T1:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<8 x f32>>
+//       CHECK:    %[[T2:.*]] = llvm.insertvalue %[[T1]], %[[T0]][0] : !llvm.array<2 x vec<8 x f32>>
+//       CHECK:    %[[T3:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<8 x f32>>
+//       CHECK:    %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T2]][1] : !llvm.array<2 x vec<8 x f32>>
+//       CHECK:    llvm.return %[[T4]] : !llvm.array<2 x vec<8 x f32>>
 
 func @extract_strided_slice3(%arg0: vector<4x8xf32>) -> vector<2x2xf32> {
   %0 = vector.extract_strided_slice %arg0 {offsets = [2, 2], sizes = [2, 2], strides = [1, 1]} : vector<4x8xf32> to vector<2x2xf32>
   return %0 : vector<2x2xf32>
 }
 // CHECK-LABEL: llvm.func @extract_strided_slice3(
-//  CHECK-SAME:    %[[A:.*]]: !llvm.array<4 x vec<8 x float>>)
-//       CHECK:    %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x2xf32>) : !llvm.array<2 x vec<2 x float>>
-//       CHECK:    %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<8 x float>>
-//       CHECK:    %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2, 3] : !llvm.vec<8 x float>, !llvm.vec<8 x float>
-//       CHECK:    %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T1]][0] : !llvm.array<2 x vec<2 x float>>
-//       CHECK:    %[[T5:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<8 x float>>
-//       CHECK:    %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T5]] [2, 3] : !llvm.vec<8 x float>, !llvm.vec<8 x float>
-//       CHECK:    %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T4]][1] : !llvm.array<2 x vec<2 x float>>
-//       CHECK:    llvm.return %[[T7]] : !llvm.array<2 x vec<2 x float>>
+//  CHECK-SAME:    %[[A:.*]]: !llvm.array<4 x vec<8 x f32>>)
+//       CHECK:    %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<2x2xf32>) : !llvm.array<2 x vec<2 x f32>>
+//       CHECK:    %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<4 x vec<8 x f32>>
+//       CHECK:    %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2, 3] : !llvm.vec<8 x f32>, !llvm.vec<8 x f32>
+//       CHECK:    %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T1]][0] : !llvm.array<2 x vec<2 x f32>>
+//       CHECK:    %[[T5:.*]] = llvm.extractvalue %[[A]][3] : !llvm.array<4 x vec<8 x f32>>
+//       CHECK:    %[[T6:.*]] = llvm.shufflevector %[[T5]], %[[T5]] [2, 3] : !llvm.vec<8 x f32>, !llvm.vec<8 x f32>
+//       CHECK:    %[[T7:.*]] = llvm.insertvalue %[[T6]], %[[T4]][1] : !llvm.array<2 x vec<2 x f32>>
+//       CHECK:    llvm.return %[[T7]] : !llvm.array<2 x vec<2 x f32>>
 
 func @insert_strided_slice1(%b: vector<4x4xf32>, %c: vector<4x4x4xf32>) -> vector<4x4x4xf32> {
   %0 = vector.insert_strided_slice %b, %c {offsets = [2, 0, 0], strides = [1, 1]} : vector<4x4xf32> into vector<4x4x4xf32>
   return %0 : vector<4x4x4xf32>
 }
 // CHECK-LABEL: llvm.func @insert_strided_slice1
-//       CHECK:    llvm.extractvalue {{.*}}[2] : !llvm.array<4 x array<4 x vec<4 x float>>>
-//  CHECK-NEXT:    llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x array<4 x vec<4 x float>>>
+//       CHECK:    llvm.extractvalue {{.*}}[2] : !llvm.array<4 x array<4 x vec<4 x f32>>>
+//  CHECK-NEXT:    llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x array<4 x vec<4 x f32>>>
 
 func @insert_strided_slice2(%a: vector<2x2xf32>, %b: vector<4x4xf32>) -> vector<4x4xf32> {
   %0 = vector.insert_strided_slice %a, %b {offsets = [2, 2], strides = [1, 1]} : vector<2x2xf32> into vector<4x4xf32>
@@ -649,34 +649,34 @@ func @insert_strided_slice2(%a: vector<2x2xf32>, %b: vector<4x4xf32>) -> vector<
 // CHECK-LABEL: llvm.func @insert_strided_slice2
 //
 // Subvector vector<2xf32> @0 into vector<4xf32> @2
-//       CHECK:    llvm.extractvalue {{.*}}[0] : !llvm.array<2 x vec<2 x float>>
-//  CHECK-NEXT:    llvm.extractvalue {{.*}}[2] : !llvm.array<4 x vec<4 x float>>
+//       CHECK:    llvm.extractvalue {{.*}}[0] : !llvm.array<2 x vec<2 x f32>>
+//  CHECK-NEXT:    llvm.extractvalue {{.*}}[2] : !llvm.array<4 x vec<4 x f32>>
 // Element @0 -> element @2
 //  CHECK-NEXT:    llvm.mlir.constant(0 : index) : i64
-//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x f32>
 //  CHECK-NEXT:    llvm.mlir.constant(2 : index) : i64
-//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
+//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x f32>
 // Element @1 -> element @3
 //  CHECK-NEXT:    llvm.mlir.constant(1 : index) : i64
-//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x f32>
 //  CHECK-NEXT:    llvm.mlir.constant(3 : index) : i64
-//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
-//  CHECK-NEXT:    llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x vec<4 x float>>
+//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x f32>
+//  CHECK-NEXT:    llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.array<4 x vec<4 x f32>>
 //
 // Subvector vector<2xf32> @1 into vector<4xf32> @3
-//       CHECK:    llvm.extractvalue {{.*}}[1] : !llvm.array<2 x vec<2 x float>>
-//  CHECK-NEXT:    llvm.extractvalue {{.*}}[3] : !llvm.array<4 x vec<4 x float>>
+//       CHECK:    llvm.extractvalue {{.*}}[1] : !llvm.array<2 x vec<2 x f32>>
+//  CHECK-NEXT:    llvm.extractvalue {{.*}}[3] : !llvm.array<4 x vec<4 x f32>>
 // Element @0 -> element @2
 //  CHECK-NEXT:    llvm.mlir.constant(0 : index) : i64
-//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x f32>
 //  CHECK-NEXT:    llvm.mlir.constant(2 : index) : i64
-//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
+//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x f32>
 // Element @1 -> element @3
 //  CHECK-NEXT:    llvm.mlir.constant(1 : index) : i64
-//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x float>
+//  CHECK-NEXT:    llvm.extractelement {{.*}}[{{.*}} : i64] : !llvm.vec<2 x f32>
 //  CHECK-NEXT:    llvm.mlir.constant(3 : index) : i64
-//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x float>
-//  CHECK-NEXT:    llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x vec<4 x float>>
+//  CHECK-NEXT:    llvm.insertelement {{.*}}, {{.*}}[{{.*}} : i64] : !llvm.vec<4 x f32>
+//  CHECK-NEXT:    llvm.insertvalue {{.*}}, {{.*}}[3] : !llvm.array<4 x vec<4 x f32>>
 
 func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -> vector<16x4x8xf32> {
   %0 = vector.insert_strided_slice %arg0, %arg1 {offsets = [0, 0, 2], strides = [1, 1]}:
@@ -684,49 +684,49 @@ func @insert_strided_slice3(%arg0: vector<2x4xf32>, %arg1: vector<16x4x8xf32>) -
   return %0 : vector<16x4x8xf32>
 }
 // CHECK-LABEL: llvm.func @insert_strided_slice3(
-// CHECK-SAME: %[[A:.*]]: !llvm.array<2 x vec<4 x float>>,
-// CHECK-SAME: %[[B:.*]]: !llvm.array<16 x array<4 x vec<8 x float>>>)
-//      CHECK: %[[s0:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x float>>>
-//      CHECK: %[[s1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<4 x float>>
-//      CHECK: %[[s2:.*]] = llvm.extractvalue %[[B]][0, 0] : !llvm.array<16 x array<4 x vec<8 x float>>>
+// CHECK-SAME: %[[A:.*]]: !llvm.array<2 x vec<4 x f32>>,
+// CHECK-SAME: %[[B:.*]]: !llvm.array<16 x array<4 x vec<8 x f32>>>)
+//      CHECK: %[[s0:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x f32>>>
+//      CHECK: %[[s1:.*]] = llvm.extractvalue %[[A]][0] : !llvm.array<2 x vec<4 x f32>>
+//      CHECK: %[[s2:.*]] = llvm.extractvalue %[[B]][0, 0] : !llvm.array<16 x array<4 x vec<8 x f32>>>
 //      CHECK: %[[s3:.*]] = llvm.mlir.constant(0 : index) : i64
-//      CHECK: %[[s4:.*]] = llvm.extractelement %[[s1]][%[[s3]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s4:.*]] = llvm.extractelement %[[s1]][%[[s3]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s5:.*]] = llvm.mlir.constant(2 : index) : i64
-//      CHECK: %[[s6:.*]] = llvm.insertelement %[[s4]], %[[s2]][%[[s5]] : i64] : !llvm.vec<8 x float>
+//      CHECK: %[[s6:.*]] = llvm.insertelement %[[s4]], %[[s2]][%[[s5]] : i64] : !llvm.vec<8 x f32>
 //      CHECK: %[[s7:.*]] = llvm.mlir.constant(1 : index) : i64
-//      CHECK: %[[s8:.*]] = llvm.extractelement %[[s1]][%[[s7]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s8:.*]] = llvm.extractelement %[[s1]][%[[s7]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s9:.*]] = llvm.mlir.constant(3 : index) : i64
-//      CHECK: %[[s10:.*]] = llvm.insertelement %[[s8]], %[[s6]][%[[s9]] : i64] : !llvm.vec<8 x float>
+//      CHECK: %[[s10:.*]] = llvm.insertelement %[[s8]], %[[s6]][%[[s9]] : i64] : !llvm.vec<8 x f32>
 //      CHECK: %[[s11:.*]] = llvm.mlir.constant(2 : index) : i64
-//      CHECK: %[[s12:.*]] = llvm.extractelement %[[s1]][%[[s11]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s12:.*]] = llvm.extractelement %[[s1]][%[[s11]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s13:.*]] = llvm.mlir.constant(4 : index) : i64
-//      CHECK: %[[s14:.*]] = llvm.insertelement %[[s12]], %[[s10]][%[[s13]] : i64] : !llvm.vec<8 x float>
+//      CHECK: %[[s14:.*]] = llvm.insertelement %[[s12]], %[[s10]][%[[s13]] : i64] : !llvm.vec<8 x f32>
 //      CHECK: %[[s15:.*]] = llvm.mlir.constant(3 : index) : i64
-//      CHECK: %[[s16:.*]] = llvm.extractelement %[[s1]][%[[s15]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s16:.*]] = llvm.extractelement %[[s1]][%[[s15]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s17:.*]] = llvm.mlir.constant(5 : index) : i64
-//      CHECK: %[[s18:.*]] = llvm.insertelement %[[s16]], %[[s14]][%[[s17]] : i64] : !llvm.vec<8 x float>
-//      CHECK: %[[s19:.*]] = llvm.insertvalue %[[s18]], %[[s0]][0] : !llvm.array<4 x vec<8 x float>>
-//      CHECK: %[[s20:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<4 x float>>
-//      CHECK: %[[s21:.*]] = llvm.extractvalue %[[B]][0, 1] : !llvm.array<16 x array<4 x vec<8 x float>>>
+//      CHECK: %[[s18:.*]] = llvm.insertelement %[[s16]], %[[s14]][%[[s17]] : i64] : !llvm.vec<8 x f32>
+//      CHECK: %[[s19:.*]] = llvm.insertvalue %[[s18]], %[[s0]][0] : !llvm.array<4 x vec<8 x f32>>
+//      CHECK: %[[s20:.*]] = llvm.extractvalue %[[A]][1] : !llvm.array<2 x vec<4 x f32>>
+//      CHECK: %[[s21:.*]] = llvm.extractvalue %[[B]][0, 1] : !llvm.array<16 x array<4 x vec<8 x f32>>>
 //      CHECK: %[[s22:.*]] = llvm.mlir.constant(0 : index) : i64
-//      CHECK: %[[s23:.*]] = llvm.extractelement %[[s20]][%[[s22]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s23:.*]] = llvm.extractelement %[[s20]][%[[s22]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s24:.*]] = llvm.mlir.constant(2 : index) : i64
-//      CHECK: %[[s25:.*]] = llvm.insertelement %[[s23]], %[[s21]][%[[s24]] : i64] : !llvm.vec<8 x float>
+//      CHECK: %[[s25:.*]] = llvm.insertelement %[[s23]], %[[s21]][%[[s24]] : i64] : !llvm.vec<8 x f32>
 //      CHECK: %[[s26:.*]] = llvm.mlir.constant(1 : index) : i64
-//      CHECK: %[[s27:.*]] = llvm.extractelement %[[s20]][%[[s26]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s27:.*]] = llvm.extractelement %[[s20]][%[[s26]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s28:.*]] = llvm.mlir.constant(3 : index) : i64
-//      CHECK: %[[s29:.*]] = llvm.insertelement %[[s27]], %[[s25]][%[[s28]] : i64] : !llvm.vec<8 x float>
+//      CHECK: %[[s29:.*]] = llvm.insertelement %[[s27]], %[[s25]][%[[s28]] : i64] : !llvm.vec<8 x f32>
 //      CHECK: %[[s30:.*]] = llvm.mlir.constant(2 : index) : i64
-//      CHECK: %[[s31:.*]] = llvm.extractelement %[[s20]][%[[s30]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s31:.*]] = llvm.extractelement %[[s20]][%[[s30]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s32:.*]] = llvm.mlir.constant(4 : index) : i64
-//      CHECK: %[[s33:.*]] = llvm.insertelement %[[s31]], %[[s29]][%[[s32]] : i64] : !llvm.vec<8 x float>
+//      CHECK: %[[s33:.*]] = llvm.insertelement %[[s31]], %[[s29]][%[[s32]] : i64] : !llvm.vec<8 x f32>
 //      CHECK: %[[s34:.*]] = llvm.mlir.constant(3 : index) : i64
-//      CHECK: %[[s35:.*]] = llvm.extractelement %[[s20]][%[[s34]] : i64] : !llvm.vec<4 x float>
+//      CHECK: %[[s35:.*]] = llvm.extractelement %[[s20]][%[[s34]] : i64] : !llvm.vec<4 x f32>
 //      CHECK: %[[s36:.*]] = llvm.mlir.constant(5 : index) : i64
-//      CHECK: %[[s37:.*]] = llvm.insertelement %[[s35]], %[[s33]][%[[s36]] : i64] : !llvm.vec<8 x float>
-//      CHECK: %[[s38:.*]] = llvm.insertvalue %[[s37]], %[[s19]][1] : !llvm.array<4 x vec<8 x float>>
-//      CHECK: %[[s39:.*]] = llvm.insertvalue %[[s38]], %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x float>>>
-//      CHECK: llvm.return %[[s39]] : !llvm.array<16 x array<4 x vec<8 x float>>>
+//      CHECK: %[[s37:.*]] = llvm.insertelement %[[s35]], %[[s33]][%[[s36]] : i64] : !llvm.vec<8 x f32>
+//      CHECK: %[[s38:.*]] = llvm.insertvalue %[[s37]], %[[s19]][1] : !llvm.array<4 x vec<8 x f32>>
+//      CHECK: %[[s39:.*]] = llvm.insertvalue %[[s38]], %[[B]][0] : !llvm.array<16 x array<4 x vec<8 x f32>>>
+//      CHECK: llvm.return %[[s39]] : !llvm.array<16 x array<4 x vec<8 x f32>>>
 
 func @extract_strides(%arg0: vector<3x3xf32>) -> vector<1x1xf32> {
   %0 = vector.extract_slices %arg0, [2, 2], [1, 1]
@@ -735,33 +735,33 @@ func @extract_strides(%arg0: vector<3x3xf32>) -> vector<1x1xf32> {
   return %1 : vector<1x1xf32>
 }
 // CHECK-LABEL: llvm.func @extract_strides(
-// CHECK-SAME: %[[A:.*]]: !llvm.array<3 x vec<3 x float>>)
-//      CHECK: %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<1x1xf32>) : !llvm.array<1 x vec<1 x float>>
-//      CHECK: %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<3 x vec<3 x float>>
-//      CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2] : !llvm.vec<3 x float>, !llvm.vec<3 x float>
-//      CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T1]][0] : !llvm.array<1 x vec<1 x float>>
-//      CHECK: llvm.return %[[T4]] : !llvm.array<1 x vec<1 x float>>
+// CHECK-SAME: %[[A:.*]]: !llvm.array<3 x vec<3 x f32>>)
+//      CHECK: %[[T1:.*]] = llvm.mlir.constant(dense<0.000000e+00> : vector<1x1xf32>) : !llvm.array<1 x vec<1 x f32>>
+//      CHECK: %[[T2:.*]] = llvm.extractvalue %[[A]][2] : !llvm.array<3 x vec<3 x f32>>
+//      CHECK: %[[T3:.*]] = llvm.shufflevector %[[T2]], %[[T2]] [2] : !llvm.vec<3 x f32>, !llvm.vec<3 x f32>
+//      CHECK: %[[T4:.*]] = llvm.insertvalue %[[T3]], %[[T1]][0] : !llvm.array<1 x vec<1 x f32>>
+//      CHECK: llvm.return %[[T4]] : !llvm.array<1 x vec<1 x f32>>
 
 // CHECK-LABEL: llvm.func @vector_fma(
-//  CHECK-SAME: %[[A:.*]]: !llvm.vec<8 x float>, %[[B:.*]]: !llvm.array<2 x vec<4 x float>>)
-//  CHECK-SAME: -> !llvm.struct<(vec<8 x float>, array<2 x vec<4 x float>>)> {
+//  CHECK-SAME: %[[A:.*]]: !llvm.vec<8 x f32>, %[[B:.*]]: !llvm.array<2 x vec<4 x f32>>)
+//  CHECK-SAME: -> !llvm.struct<(vec<8 x f32>, array<2 x vec<4 x f32>>)> {
 func @vector_fma(%a: vector<8xf32>, %b: vector<2x4xf32>) -> (vector<8xf32>, vector<2x4xf32>) {
   //         CHECK: "llvm.intr.fmuladd"(%[[A]], %[[A]], %[[A]]) :
-  //    CHECK-SAME:   (!llvm.vec<8 x float>, !llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  //    CHECK-SAME:   (!llvm.vec<8 x f32>, !llvm.vec<8 x f32>, !llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   %0 = vector.fma %a, %a, %a : vector<8xf32>
 
-  //       CHECK: %[[b00:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x float>>
-  //       CHECK: %[[b01:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x float>>
-  //       CHECK: %[[b02:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x float>>
+  //       CHECK: %[[b00:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x f32>>
+  //       CHECK: %[[b01:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x f32>>
+  //       CHECK: %[[b02:.*]] = llvm.extractvalue %[[B]][0] : !llvm.array<2 x vec<4 x f32>>
   //       CHECK: %[[B0:.*]] = "llvm.intr.fmuladd"(%[[b00]], %[[b01]], %[[b02]]) :
-  //  CHECK-SAME: (!llvm.vec<4 x float>, !llvm.vec<4 x float>, !llvm.vec<4 x float>) -> !llvm.vec<4 x float>
-  //       CHECK: llvm.insertvalue %[[B0]], {{.*}}[0] : !llvm.array<2 x vec<4 x float>>
-  //       CHECK: %[[b10:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x float>>
-  //       CHECK: %[[b11:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x float>>
-  //       CHECK: %[[b12:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x float>>
+  //  CHECK-SAME: (!llvm.vec<4 x f32>, !llvm.vec<4 x f32>, !llvm.vec<4 x f32>) -> !llvm.vec<4 x f32>
+  //       CHECK: llvm.insertvalue %[[B0]], {{.*}}[0] : !llvm.array<2 x vec<4 x f32>>
+  //       CHECK: %[[b10:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x f32>>
+  //       CHECK: %[[b11:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x f32>>
+  //       CHECK: %[[b12:.*]] = llvm.extractvalue %[[B]][1] : !llvm.array<2 x vec<4 x f32>>
   //       CHECK: %[[B1:.*]] = "llvm.intr.fmuladd"(%[[b10]], %[[b11]], %[[b12]]) :
-  //  CHECK-SAME: (!llvm.vec<4 x float>, !llvm.vec<4 x float>, !llvm.vec<4 x float>) -> !llvm.vec<4 x float>
-  //       CHECK: llvm.insertvalue %[[B1]], {{.*}}[1] : !llvm.array<2 x vec<4 x float>>
+  //  CHECK-SAME: (!llvm.vec<4 x f32>, !llvm.vec<4 x f32>, !llvm.vec<4 x f32>) -> !llvm.vec<4 x f32>
+  //       CHECK: llvm.insertvalue %[[B1]], {{.*}}[1] : !llvm.array<2 x vec<4 x f32>>
   %1 = vector.fma %b, %b, %b : vector<2x4xf32>
 
   return %0, %1: vector<8xf32>, vector<2x4xf32>
@@ -772,33 +772,33 @@ func @reduce_f16(%arg0: vector<16xf16>) -> f16 {
   return %0 : f16
 }
 // CHECK-LABEL: llvm.func @reduce_f16(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x half>)
-//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : !llvm.half
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x f16>)
+//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f16) : f16
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (!llvm.half, !llvm.vec<16 x half>) -> !llvm.half
-//      CHECK: llvm.return %[[V]] : !llvm.half
+// CHECK-SAME: {reassoc = false} : (f16, !llvm.vec<16 x f16>) -> f16
+//      CHECK: llvm.return %[[V]] : f16
 
 func @reduce_f32(%arg0: vector<16xf32>) -> f32 {
   %0 = vector.reduction "add", %arg0 : vector<16xf32> into f32
   return %0 : f32
 }
 // CHECK-LABEL: llvm.func @reduce_f32(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x float>)
-//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : !llvm.float
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x f32>)
+//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f32) : f32
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (!llvm.float, !llvm.vec<16 x float>) -> !llvm.float
-//      CHECK: llvm.return %[[V]] : !llvm.float
+// CHECK-SAME: {reassoc = false} : (f32, !llvm.vec<16 x f32>) -> f32
+//      CHECK: llvm.return %[[V]] : f32
 
 func @reduce_f64(%arg0: vector<16xf64>) -> f64 {
   %0 = vector.reduction "add", %arg0 : vector<16xf64> into f64
   return %0 : f64
 }
 // CHECK-LABEL: llvm.func @reduce_f64(
-// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x double>)
-//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : !llvm.double
+// CHECK-SAME: %[[A:.*]]: !llvm.vec<16 x f64>)
+//      CHECK: %[[C:.*]] = llvm.mlir.constant(0.000000e+00 : f64) : f64
 //      CHECK: %[[V:.*]] = "llvm.intr.vector.reduce.fadd"(%[[C]], %[[A]])
-// CHECK-SAME: {reassoc = false} : (!llvm.double, !llvm.vec<16 x double>) -> !llvm.double
-//      CHECK: llvm.return %[[V]] : !llvm.double
+// CHECK-SAME: {reassoc = false} : (f64, !llvm.vec<16 x f64>) -> f64
+//      CHECK: llvm.return %[[V]] : f64
 
 func @reduce_i8(%arg0: vector<16xi8>) -> i8 {
   %0 = vector.reduction "add", %arg0 : vector<16xi8> into i8
@@ -838,7 +838,7 @@ func @matrix_ops(%A: vector<64xf64>, %B: vector<48xf64>) -> vector<12xf64> {
 // CHECK-LABEL: llvm.func @matrix_ops
 //       CHECK:   llvm.intr.matrix.multiply %{{.*}}, %{{.*}} {
 //  CHECK-SAME: lhs_columns = 16 : i32, lhs_rows = 4 : i32, rhs_columns = 3 : i32
-//  CHECK-SAME: } : (!llvm.vec<64 x double>, !llvm.vec<48 x double>) -> !llvm.vec<12 x double>
+//  CHECK-SAME: } : (!llvm.vec<64 x f64>, !llvm.vec<48 x f64>) -> !llvm.vec<12 x f64>
 
 func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
   %f7 = constant 7.0: f32
@@ -851,15 +851,15 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
   return %f: vector<17xf32>
 }
 // CHECK-LABEL: func @transfer_read_1d
-//  CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
+//  CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x f32>
 //
 // 1. Bitcast to vector form.
 //       CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
-//  CHECK-SAME: (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+//  CHECK-SAME: (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
 //       CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] :
-//  CHECK-SAME: !llvm.ptr<float> to !llvm.ptr<vec<17 x float>>
+//  CHECK-SAME: !llvm.ptr<f32> to !llvm.ptr<vec<17 x f32>>
 //       CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 0] :
-//  CHECK-SAME: !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+//  CHECK-SAME: !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 //
 // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
 //       CHECK: %[[linearIndex:.*]] = llvm.mlir.constant(dense
@@ -897,17 +897,17 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
 //
 // 5. Rewrite as a masked read.
 //       CHECK: %[[PASS_THROUGH:.*]] =  llvm.mlir.constant(dense<7.000000e+00> :
-//  CHECK-SAME:  vector<17xf32>) : !llvm.vec<17 x float>
+//  CHECK-SAME:  vector<17xf32>) : !llvm.vec<17 x f32>
 //       CHECK: %[[loaded:.*]] = llvm.intr.masked.load %[[vecPtr]], %[[mask]],
 //  CHECK-SAME: %[[PASS_THROUGH]] {alignment = 4 : i32} :
-//  CHECK-SAME: (!llvm.ptr<vec<17 x float>>, !llvm.vec<17 x i1>, !llvm.vec<17 x float>) -> !llvm.vec<17 x float>
+//  CHECK-SAME: (!llvm.ptr<vec<17 x f32>>, !llvm.vec<17 x i1>, !llvm.vec<17 x f32>) -> !llvm.vec<17 x f32>
 
 //
 // 1. Bitcast to vector form.
 //       CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
-//  CHECK-SAME: (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+//  CHECK-SAME: (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
 //       CHECK: %[[vecPtr_b:.*]] = llvm.bitcast %[[gep_b]] :
-//  CHECK-SAME: !llvm.ptr<float> to !llvm.ptr<vec<17 x float>>
+//  CHECK-SAME: !llvm.ptr<f32> to !llvm.ptr<vec<17 x f32>>
 //
 // 2. Create a vector with linear indices [ 0 .. vector_length - 1 ].
 //       CHECK: %[[linearIndex_b:.*]] = llvm.mlir.constant(dense
@@ -932,7 +932,7 @@ func @transfer_read_1d(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
 // 5. Rewrite as a masked write.
 //       CHECK: llvm.intr.masked.store %[[loaded]], %[[vecPtr_b]], %[[mask_b]]
 //  CHECK-SAME: {alignment = 4 : i32} :
-//  CHECK-SAME: !llvm.vec<17 x float>, !llvm.vec<17 x i1> into !llvm.ptr<vec<17 x float>>
+//  CHECK-SAME: !llvm.vec<17 x f32>, !llvm.vec<17 x i1> into !llvm.ptr<vec<17 x f32>>
 
 func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index) -> vector<17xf32> {
   %f7 = constant 7.0: f32
@@ -942,9 +942,9 @@ func @transfer_read_2d_to_1d(%A : memref<?x?xf32>, %base0: index, %base1: index)
   return %f: vector<17xf32>
 }
 // CHECK-LABEL: func @transfer_read_2d_to_1d
-//  CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: i64, %[[BASE_1:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
+//  CHECK-SAME: %[[BASE_0:[a-zA-Z0-9]*]]: i64, %[[BASE_1:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x f32>
 //       CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 1] :
-//  CHECK-SAME: !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//  CHECK-SAME: !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //
 // Create offsetVector = [ offset + 0 .. offset + vector_length - 1 ].
 //       CHECK: %[[trunc:.*]] = llvm.trunc %[[BASE_1]] : i64 to i32
@@ -982,23 +982,23 @@ func @transfer_read_1d_non_zero_addrspace(%A : memref<?xf32, 3>, %base: index) -
   return %f: vector<17xf32>
 }
 // CHECK-LABEL: func @transfer_read_1d_non_zero_addrspace
-//  CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
+//  CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x f32>
 //
 // 1. Check address space for GEP is correct.
 //       CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
-//  CHECK-SAME: (!llvm.ptr<float, 3>, i64) -> !llvm.ptr<float, 3>
+//  CHECK-SAME: (!llvm.ptr<f32, 3>, i64) -> !llvm.ptr<f32, 3>
 //       CHECK: %[[vecPtr:.*]] = llvm.addrspacecast %[[gep]] :
-//  CHECK-SAME: !llvm.ptr<float, 3> to !llvm.ptr<vec<17 x float>>
+//  CHECK-SAME: !llvm.ptr<f32, 3> to !llvm.ptr<vec<17 x f32>>
 //
 // 2. Check address space of the memref is correct.
 //       CHECK: %[[DIM:.*]] = llvm.extractvalue %{{.*}}[3, 0] :
-//  CHECK-SAME: !llvm.struct<(ptr<float, 3>, ptr<float, 3>, i64, array<1 x i64>, array<1 x i64>)>
+//  CHECK-SAME: !llvm.struct<(ptr<f32, 3>, ptr<f32, 3>, i64, array<1 x i64>, array<1 x i64>)>
 //
 // 3. Check address apce for GEP is correct.
 //       CHECK: %[[gep_b:.*]] = llvm.getelementptr {{.*}} :
-//  CHECK-SAME: (!llvm.ptr<float, 3>, i64) -> !llvm.ptr<float, 3>
+//  CHECK-SAME: (!llvm.ptr<f32, 3>, i64) -> !llvm.ptr<f32, 3>
 //       CHECK: %[[vecPtr_b:.*]] = llvm.addrspacecast %[[gep_b]] :
-//  CHECK-SAME: !llvm.ptr<float, 3> to !llvm.ptr<vec<17 x float>>
+//  CHECK-SAME: !llvm.ptr<f32, 3> to !llvm.ptr<vec<17 x f32>>
 
 func @transfer_read_1d_not_masked(%A : memref<?xf32>, %base: index) -> vector<17xf32> {
   %f7 = constant 7.0: f32
@@ -1007,16 +1007,16 @@ func @transfer_read_1d_not_masked(%A : memref<?xf32>, %base: index) -> vector<17
   return %f: vector<17xf32>
 }
 // CHECK-LABEL: func @transfer_read_1d_not_masked
-//  CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x float>
+//  CHECK-SAME: %[[BASE:[a-zA-Z0-9]*]]: i64) -> !llvm.vec<17 x f32>
 //
 // 1. Bitcast to vector form.
 //       CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}} :
-//  CHECK-SAME: (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
+//  CHECK-SAME: (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
 //       CHECK: %[[vecPtr:.*]] = llvm.bitcast %[[gep]] :
-//  CHECK-SAME: !llvm.ptr<float> to !llvm.ptr<vec<17 x float>>
+//  CHECK-SAME: !llvm.ptr<f32> to !llvm.ptr<vec<17 x f32>>
 //
 // 2. Rewrite as a load.
-//       CHECK: %[[loaded:.*]] = llvm.load %[[vecPtr]] {alignment = 4 : i64} : !llvm.ptr<vec<17 x float>>
+//       CHECK: %[[loaded:.*]] = llvm.load %[[vecPtr]] {alignment = 4 : i64} : !llvm.ptr<vec<17 x f32>>
 
 func @transfer_read_1d_cast(%A : memref<?xi32>, %base: index) -> vector<12xi8> {
   %c0 = constant 0: i32
@@ -1063,11 +1063,11 @@ func @flat_transpose(%arg0: vector<16xf32>) -> vector<16xf32> {
 }
 
 // CHECK-LABEL: func @flat_transpose
-// CHECK-SAME:  %[[A:.*]]: !llvm.vec<16 x float>
+// CHECK-SAME:  %[[A:.*]]: !llvm.vec<16 x f32>
 // CHECK:       %[[T:.*]] = llvm.intr.matrix.transpose %[[A]]
 // CHECK-SAME:      {columns = 4 : i32, rows = 4 : i32} :
-// CHECK-SAME:      !llvm.vec<16 x float> into !llvm.vec<16 x float>
-// CHECK:       llvm.return %[[T]] : !llvm.vec<16 x float>
+// CHECK-SAME:      !llvm.vec<16 x f32> into !llvm.vec<16 x f32>
+// CHECK:       llvm.return %[[T]] : !llvm.vec<16 x f32>
 
 func @masked_load_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) -> vector<16xf32> {
   %0 = vector.maskedload %arg0, %arg1, %arg2 : memref<?xf32>, vector<16xi1>, vector<16xf32> into vector<16xf32>
@@ -1075,9 +1075,9 @@ func @masked_load_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<1
 }
 
 // CHECK-LABEL: func @masked_load_op
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<vec<16 x float>>) -> !llvm.ptr<vec<16 x float>>
-// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr<vec<16 x float>>, !llvm.vec<16 x i1>, !llvm.vec<16 x float>) -> !llvm.vec<16 x float>
-// CHECK: llvm.return %[[L]] : !llvm.vec<16 x float>
+// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<vec<16 x f32>>) -> !llvm.ptr<vec<16 x f32>>
+// CHECK: %[[L:.*]] = llvm.intr.masked.load %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.ptr<vec<16 x f32>>, !llvm.vec<16 x i1>, !llvm.vec<16 x f32>) -> !llvm.vec<16 x f32>
+// CHECK: llvm.return %[[L]] : !llvm.vec<16 x f32>
 
 func @masked_store_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<16xf32>) {
   vector.maskedstore %arg0, %arg1, %arg2 : vector<16xi1>, vector<16xf32> into memref<?xf32>
@@ -1085,8 +1085,8 @@ func @masked_store_op(%arg0: memref<?xf32>, %arg1: vector<16xi1>, %arg2: vector<
 }
 
 // CHECK-LABEL: func @masked_store_op
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<vec<16 x float>>) -> !llvm.ptr<vec<16 x float>>
-// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : !llvm.vec<16 x float>, !llvm.vec<16 x i1> into !llvm.ptr<vec<16 x float>>
+// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<vec<16 x f32>>) -> !llvm.ptr<vec<16 x f32>>
+// CHECK: llvm.intr.masked.store %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : !llvm.vec<16 x f32>, !llvm.vec<16 x i1> into !llvm.ptr<vec<16 x f32>>
 // CHECK: llvm.return
 
 func @gather_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) -> vector<3xf32> {
@@ -1095,9 +1095,9 @@ func @gather_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>,
 }
 
 // CHECK-LABEL: func @gather_op
-// CHECK: %[[P:.*]] = llvm.getelementptr {{.*}}[%{{.*}}] : (!llvm.ptr<float>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x ptr<float>>
-// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr<float>>, !llvm.vec<3 x i1>, !llvm.vec<3 x float>) -> !llvm.vec<3 x float>
-// CHECK: llvm.return %[[G]] : !llvm.vec<3 x float>
+// CHECK: %[[P:.*]] = llvm.getelementptr {{.*}}[%{{.*}}] : (!llvm.ptr<f32>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x ptr<f32>>
+// CHECK: %[[G:.*]] = llvm.intr.masked.gather %[[P]], %{{.*}}, %{{.*}} {alignment = 4 : i32} : (!llvm.vec<3 x ptr<f32>>, !llvm.vec<3 x i1>, !llvm.vec<3 x f32>) -> !llvm.vec<3 x f32>
+// CHECK: llvm.return %[[G]] : !llvm.vec<3 x f32>
 
 func @scatter_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>, %arg3: vector<3xf32>) {
   vector.scatter %arg0, %arg1, %arg2, %arg3 : vector<3xi32>, vector<3xi1>, vector<3xf32> into memref<?xf32>
@@ -1105,8 +1105,8 @@ func @scatter_op(%arg0: memref<?xf32>, %arg1: vector<3xi32>, %arg2: vector<3xi1>
 }
 
 // CHECK-LABEL: func @scatter_op
-// CHECK: %[[P:.*]] = llvm.getelementptr {{.*}}[%{{.*}}] : (!llvm.ptr<float>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x ptr<float>>
-// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : !llvm.vec<3 x float>, !llvm.vec<3 x i1> into !llvm.vec<3 x ptr<float>>
+// CHECK: %[[P:.*]] = llvm.getelementptr {{.*}}[%{{.*}}] : (!llvm.ptr<f32>, !llvm.vec<3 x i32>) -> !llvm.vec<3 x ptr<f32>>
+// CHECK: llvm.intr.masked.scatter %{{.*}}, %[[P]], %{{.*}} {alignment = 4 : i32} : !llvm.vec<3 x f32>, !llvm.vec<3 x i1> into !llvm.vec<3 x ptr<f32>>
 // CHECK: llvm.return
 
 func @expand_load_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) -> vector<11xf32> {
@@ -1115,9 +1115,9 @@ func @expand_load_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<1
 }
 
 // CHECK-LABEL: func @expand_load_op
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<float>) -> !llvm.ptr<float>
-// CHECK: %[[E:.*]] = "llvm.intr.masked.expandload"(%[[P]], %{{.*}}, %{{.*}}) : (!llvm.ptr<float>, !llvm.vec<11 x i1>, !llvm.vec<11 x float>) -> !llvm.vec<11 x float>
-// CHECK: llvm.return %[[E]] : !llvm.vec<11 x float>
+// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<f32>) -> !llvm.ptr<f32>
+// CHECK: %[[E:.*]] = "llvm.intr.masked.expandload"(%[[P]], %{{.*}}, %{{.*}}) : (!llvm.ptr<f32>, !llvm.vec<11 x i1>, !llvm.vec<11 x f32>) -> !llvm.vec<11 x f32>
+// CHECK: llvm.return %[[E]] : !llvm.vec<11 x f32>
 
 func @compress_store_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vector<11xf32>) {
   vector.compressstore %arg0, %arg1, %arg2 : memref<?xf32>, vector<11xi1>, vector<11xf32>
@@ -1125,6 +1125,6 @@ func @compress_store_op(%arg0: memref<?xf32>, %arg1: vector<11xi1>, %arg2: vecto
 }
 
 // CHECK-LABEL: func @compress_store_op
-// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<float>) -> !llvm.ptr<float>
-// CHECK: "llvm.intr.masked.compressstore"(%{{.*}}, %[[P]], %{{.*}}) : (!llvm.vec<11 x float>, !llvm.ptr<float>, !llvm.vec<11 x i1>) -> ()
+// CHECK: %[[P:.*]] = llvm.getelementptr %{{.*}}[] : (!llvm.ptr<f32>) -> !llvm.ptr<f32>
+// CHECK: "llvm.intr.masked.compressstore"(%{{.*}}, %[[P]], %{{.*}}) : (!llvm.vec<11 x f32>, !llvm.ptr<f32>, !llvm.vec<11 x i1>) -> ()
 // CHECK: llvm.return

diff  --git a/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir b/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir
index f8522c896008..f8483dcc7f80 100644
--- a/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir
+++ b/mlir/test/Conversion/VectorToROCDL/vector-to-rocdl.mlir
@@ -9,7 +9,7 @@ func @transfer_readx2(%A : memref<?xf32>, %base: index) -> vector<2xf32> {
   return %f: vector<2xf32>
 }
 // CHECK-LABEL: @transfer_readx2
-// CHECK: rocdl.buffer.load {{.*}} !llvm.vec<2 x float>
+// CHECK: rocdl.buffer.load {{.*}} !llvm.vec<2 x f32>
 
 func @transfer_readx4(%A : memref<?xf32>, %base: index) -> vector<4xf32> {
   %f0 = constant 0.0: f32
@@ -19,7 +19,7 @@ func @transfer_readx4(%A : memref<?xf32>, %base: index) -> vector<4xf32> {
   return %f: vector<4xf32>
 }
 // CHECK-LABEL: @transfer_readx4
-// CHECK: rocdl.buffer.load {{.*}} !llvm.vec<4 x float>
+// CHECK: rocdl.buffer.load {{.*}} !llvm.vec<4 x f32>
 
 func @transfer_read_dwordConfig(%A : memref<?xf32>, %base: index) -> vector<4xf32> {
   %f0 = constant 0.0: f32
@@ -43,7 +43,7 @@ func @transfer_writex2(%A : memref<?xf32>, %B : vector<2xf32>, %base: index) {
   return
 }
 // CHECK-LABEL: @transfer_writex2
-// CHECK: rocdl.buffer.store {{.*}} !llvm.vec<2 x float>
+// CHECK: rocdl.buffer.store {{.*}} !llvm.vec<2 x f32>
 
 func @transfer_writex4(%A : memref<?xf32>, %B : vector<4xf32>, %base: index) {
   vector.transfer_write %B, %A[%base]
@@ -52,7 +52,7 @@ func @transfer_writex4(%A : memref<?xf32>, %B : vector<4xf32>, %base: index) {
   return
 }
 // CHECK-LABEL: @transfer_writex4
-// CHECK: rocdl.buffer.store {{.*}} !llvm.vec<4 x float>
+// CHECK: rocdl.buffer.store {{.*}} !llvm.vec<4 x f32>
 
 func @transfer_write_dwordConfig(%A : memref<?xf32>, %B : vector<2xf32>, %base: index) {
   vector.transfer_write %B, %A[%base]

diff  --git a/mlir/test/Dialect/GPU/invalid.mlir b/mlir/test/Dialect/GPU/invalid.mlir
index 1f6058cde397..4879c51479e0 100644
--- a/mlir/test/Dialect/GPU/invalid.mlir
+++ b/mlir/test/Dialect/GPU/invalid.mlir
@@ -87,7 +87,7 @@ module attributes {gpu.container_module} {
 module attributes {gpu.container_module} {
   module @kernels {
     // expected-error at +1 {{'gpu.func' op expects parent op 'gpu.module'}}
-    gpu.func @kernel_1(%arg1 : !llvm.ptr<float>) {
+    gpu.func @kernel_1(%arg1 : !llvm.ptr<f32>) {
       gpu.return
     }
   }
@@ -122,14 +122,14 @@ module attributes {gpu.container_module} {
 
 module attributes {gpu.container_module} {
   module @kernels {
-    gpu.func @kernel_1(%arg1 : !llvm.ptr<float>) kernel {
+    gpu.func @kernel_1(%arg1 : !llvm.ptr<f32>) kernel {
       gpu.return
     }
   }
 
-  func @launch_func_missing_kernel_attr(%sz : index, %arg : !llvm.ptr<float>) {
+  func @launch_func_missing_kernel_attr(%sz : index, %arg : !llvm.ptr<f32>) {
     // expected-error at +1 {{kernel module 'kernels' is undefined}}
-    gpu.launch_func @kernels::@kernel_1 blocks in (%sz, %sz, %sz) threads in (%sz, %sz, %sz) args(%arg : !llvm.ptr<float>)
+    gpu.launch_func @kernels::@kernel_1 blocks in (%sz, %sz, %sz) threads in (%sz, %sz, %sz) args(%arg : !llvm.ptr<f32>)
     return
   }
 }
@@ -138,14 +138,14 @@ module attributes {gpu.container_module} {
 
 module attributes {gpu.container_module} {
   gpu.module @kernels {
-    gpu.func @kernel_1(%arg1 : !llvm.ptr<float>) {
+    gpu.func @kernel_1(%arg1 : !llvm.ptr<f32>) {
       gpu.return
     }
   }
 
-  func @launch_func_missing_kernel_attr(%sz : index, %arg : !llvm.ptr<float>) {
+  func @launch_func_missing_kernel_attr(%sz : index, %arg : !llvm.ptr<f32>) {
     // expected-error at +1 {{kernel function is missing the 'gpu.kernel' attribute}}
-    gpu.launch_func @kernels::@kernel_1 blocks in (%sz, %sz, %sz) threads in (%sz, %sz, %sz) args(%arg : !llvm.ptr<float>)
+    gpu.launch_func @kernels::@kernel_1 blocks in (%sz, %sz, %sz) threads in (%sz, %sz, %sz) args(%arg : !llvm.ptr<f32>)
     return
   }
 }
@@ -154,14 +154,14 @@ module attributes {gpu.container_module} {
 
 module attributes {gpu.container_module} {
   gpu.module @kernels {
-    gpu.func @kernel_1(%arg1 : !llvm.ptr<float>) kernel {
+    gpu.func @kernel_1(%arg1 : !llvm.ptr<f32>) kernel {
       gpu.return
     }
   }
 
-  func @launch_func_kernel_operand_size(%sz : index, %arg : !llvm.ptr<float>) {
+  func @launch_func_kernel_operand_size(%sz : index, %arg : !llvm.ptr<f32>) {
     // expected-error at +1 {{got 2 kernel operands but expected 1}}
-    gpu.launch_func @kernels::@kernel_1 blocks in (%sz, %sz, %sz) threads in (%sz, %sz, %sz) args(%arg : !llvm.ptr<float>, %arg : !llvm.ptr<float>)
+    gpu.launch_func @kernels::@kernel_1 blocks in (%sz, %sz, %sz) threads in (%sz, %sz, %sz) args(%arg : !llvm.ptr<f32>, %arg : !llvm.ptr<f32>)
     return
   }
 }

diff  --git a/mlir/test/Dialect/GPU/multiple-all-reduce.mlir b/mlir/test/Dialect/GPU/multiple-all-reduce.mlir
index a084faec29c8..497adb50c142 100644
--- a/mlir/test/Dialect/GPU/multiple-all-reduce.mlir
+++ b/mlir/test/Dialect/GPU/multiple-all-reduce.mlir
@@ -18,8 +18,8 @@ func @main() {
   }
 
 // CHECK:      gpu.module @main_kernel {
-// CHECK-NEXT:   llvm.mlir.global internal @{{.*}}() {addr_space = 3 : i32} : !llvm.array<32 x float>
-// CHECK-NEXT:   llvm.mlir.global internal @{{.*}}() {addr_space = 3 : i32} : !llvm.array<32 x float>
+// CHECK-NEXT:   llvm.mlir.global internal @{{.*}}() {addr_space = 3 : i32} : !llvm.array<32 x f32>
+// CHECK-NEXT:   llvm.mlir.global internal @{{.*}}() {addr_space = 3 : i32} : !llvm.array<32 x f32>
 
   return
 }

diff  --git a/mlir/test/Dialect/LLVMIR/dialect-cast.mlir b/mlir/test/Dialect/LLVMIR/dialect-cast.mlir
index bf8f94892664..fb05a7060a2a 100644
--- a/mlir/test/Dialect/LLVMIR/dialect-cast.mlir
+++ b/mlir/test/Dialect/LLVMIR/dialect-cast.mlir
@@ -9,15 +9,11 @@ func @mlir_dialect_cast(%0: index, %1: i32, %2: bf16, %3: f16, %4: f32, %5: f64,
                         %10: memref<*xf32>) {
   llvm.mlir.cast %0 : index to i64
   llvm.mlir.cast %0 : index to i32
-  llvm.mlir.cast %2 : bf16 to !llvm.bfloat
-  llvm.mlir.cast %3 : f16 to !llvm.half
-  llvm.mlir.cast %4 : f32 to !llvm.float
-  llvm.mlir.cast %5 : f64 to !llvm.double
-  llvm.mlir.cast %6 : vector<42xf32> to !llvm.vec<42xfloat>
-  llvm.mlir.cast %7 : memref<42xf32> to !llvm.ptr<float>
-  llvm.mlir.cast %7 : memref<42xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, array<1xi64>, array<1xi64>)>
-  llvm.mlir.cast %8 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, array<1xi64>, array<1xi64>)>
-  llvm.mlir.cast %9 : memref<f32> to !llvm.struct<(ptr<float>, ptr<float>, i64)>
+  llvm.mlir.cast %6 : vector<42xf32> to !llvm.vec<42xf32>
+  llvm.mlir.cast %7 : memref<42xf32> to !llvm.ptr<f32>
+  llvm.mlir.cast %7 : memref<42xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<1xi64>)>
+  llvm.mlir.cast %8 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<1xi64>)>
+  llvm.mlir.cast %9 : memref<f32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
   llvm.mlir.cast %10 : memref<*xf32> to !llvm.struct<(i64, ptr<i8>)>
   return
 }
@@ -26,7 +22,7 @@ func @mlir_dialect_cast(%0: index, %1: i32, %2: bf16, %3: f16, %4: f32, %5: f64,
 
 func @mlir_dialect_cast_index_non_integer(%0 : index) {
   // expected-error at +1 {{invalid cast between index and non-integer type}}
-  %1 = llvm.mlir.cast %0 : index to !llvm.float
+  %1 = llvm.mlir.cast %0 : index to f32
 }
 
 // -----
@@ -34,86 +30,86 @@ func @mlir_dialect_cast_index_non_integer(%0 : index) {
 // Cast verifier is symmetric, so we only check the symmetry once by having an
 // std->llvm and llvm->std test. Everything else is std->llvm.
 
-func @mlir_dialect_cast_index_non_integer_symmetry(%0: !llvm.float) {
+func @mlir_dialect_cast_index_non_integer_symmetry(%0: f32) {
   // expected-error at +1 {{invalid cast between index and non-integer type}}
-  llvm.mlir.cast %0 : !llvm.float to index
+  llvm.mlir.cast %0 : f32 to index
 }
 
 // -----
 
 func @mlir_dialect_cast_f16(%0 : f16) {
-  // expected-error at +1 {{invalid cast between f16 and a type other than !llvm.half}}
-  llvm.mlir.cast %0 : f16 to !llvm.float
+  // expected-error at +1 {{unsupported cast}}
+  llvm.mlir.cast %0 : f16 to f32
 }
 
 // -----
 
 func @mlir_dialect_cast_bf16(%0 : bf16) {
-  // expected-error at +1 {{invalid cast between bf16 and a type other than !llvm.bfloat}}
-  llvm.mlir.cast %0 : bf16 to !llvm.half
+  // expected-error at +1 {{unsupported cast}}
+  llvm.mlir.cast %0 : bf16 to f16
 }
 
 // -----
 
 func @mlir_dialect_cast_f32(%0 : f32) {
-  // expected-error at +1 {{invalid cast between f32 and a type other than !llvm.float}}
-  llvm.mlir.cast %0 : f32 to !llvm.bfloat
+  // expected-error at +1 {{unsupported cast}}
+  llvm.mlir.cast %0 : f32 to bf16
 }
 
 // -----
 
 func @mlir_dialect_cast_f64(%0 : f64) {
-  // expected-error at +1 {{invalid cast between f64 and a type other than !llvm.double}}
-  llvm.mlir.cast %0 : f64 to !llvm.float
+  // expected-error at +1 {{unsupported cast}}
+  llvm.mlir.cast %0 : f64 to f32
 }
 
 // -----
 
 func @mlir_dialect_cast_integer_non_integer(%0 : i16) {
   // expected-error at +1 {{unsupported cast}}
-  llvm.mlir.cast %0 : i16 to !llvm.half
+  llvm.mlir.cast %0 : i16 to f16
 }
 
 // -----
 
 func @mlir_dialect_cast_nd_vector(%0 : vector<2x2xf32>) {
   // expected-error at +1 {{only 1-d vector is allowed}}
-  llvm.mlir.cast %0 : vector<2x2xf32> to !llvm.vec<4xfloat>
+  llvm.mlir.cast %0 : vector<2x2xf32> to !llvm.vec<4xf32>
 }
 
 // -----
 
 func @mlir_dialect_cast_scalable_vector(%0 : vector<2xf32>) {
   // expected-error at +1 {{only fixed-sized vector is allowed}}
-  llvm.mlir.cast %0 : vector<2xf32> to !llvm.vec<?x2xfloat>
+  llvm.mlir.cast %0 : vector<2xf32> to !llvm.vec<?x2xf32>
 }
 
 // -----
 
 func @mlir_dialect_cast_vector_size_mismatch(%0 : vector<2xf32>) {
   // expected-error at +1 {{invalid cast between vectors with mismatching sizes}}
-  llvm.mlir.cast %0 : vector<2xf32> to !llvm.vec<4xfloat>
+  llvm.mlir.cast %0 : vector<2xf32> to !llvm.vec<4xf32>
 }
 
 // -----
 
 func @mlir_dialect_cast_dynamic_memref_bare_ptr(%0 : memref<?xf32>) {
   // expected-error at +1 {{unexpected bare pointer for dynamically shaped memref}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.ptr<float>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.ptr<f32>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_bare_ptr_space(%0 : memref<4xf32, 4>) {
   // expected-error at +1 {{invalid conversion between memref and pointer in 
diff erent memory spaces}}
-  llvm.mlir.cast %0 : memref<4xf32, 4> to !llvm.ptr<float, 3>
+  llvm.mlir.cast %0 : memref<4xf32, 4> to !llvm.ptr<f32, 3>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_no_descriptor(%0 : memref<?xf32>) {
   // expected-error at +1 {{invalid cast between a memref and a type other than pointer or memref descriptor}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.float
+  llvm.mlir.cast %0 : memref<?xf32> to f32
 }
 
 // -----
@@ -134,98 +130,91 @@ func @mlir_dialect_cast_0d_memref_descriptor_wrong_num_elements(%0 : memref<f32>
 
 func @mlir_dialect_cast_memref_descriptor_allocated(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected first element of a memref descriptor to be a pointer in the address space of the memref}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(float, float, float, float, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(f32, f32, f32, f32, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_allocated_wrong_space(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected first element of a memref descriptor to be a pointer in the address space of the memref}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float, 2>, float, float, float, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32, 2>, f32, f32, f32, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_aligned(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected second element of a memref descriptor to be a pointer in the address space of the memref}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, float, float, float, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, f32, f32, f32, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_aligned_wrong_space(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected second element of a memref descriptor to be a pointer in the address space of the memref}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float, 2>, float, float, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32, 2>, f32, f32, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_offset(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected third element of a memref descriptor to be index-compatible integers}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, float, float, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, f32, f32, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_sizes(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected fourth element of a memref descriptor to be an array of <rank> index-compatible integers}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, float, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, f32, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_sizes_wrong_type(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected fourth element of a memref descriptor to be an array of <rank> index-compatible integers}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, array<10xfloat>, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<10xf32>, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_sizes_wrong_rank(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected fourth element of a memref descriptor to be an array of <rank> index-compatible integers}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, array<10xi64>, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<10xi64>, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_strides(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected fifth element of a memref descriptor to be an array of <rank> index-compatible integers}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, array<1xi64>, float)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_strides_wrong_type(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected fifth element of a memref descriptor to be an array of <rank> index-compatible integers}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, array<1xi64>, array<10xfloat>)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<10xf32>)>
 }
 
 // -----
 
 func @mlir_dialect_cast_memref_descriptor_strides_wrong_rank(%0 : memref<?xf32>) {
   // expected-error at +1 {{expected fifth element of a memref descriptor to be an array of <rank> index-compatible integers}}
-  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<float>, ptr<float>, i64, array<1xi64>, array<10xi64>)>
+  llvm.mlir.cast %0 : memref<?xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1xi64>, array<10xi64>)>
 }
 
 // -----
 
 func @mlir_dialect_cast_tensor(%0 : tensor<?xf32>) {
   // expected-error at +1 {{unsupported cast}}
-  llvm.mlir.cast %0 : tensor<?xf32> to !llvm.float
-}
-
-// -----
-
-func @mlir_dialect_cast_two_std_types(%0 : f32) {
-  // expected-error at +1 {{expected one LLVM type and one built-in type}}
-  llvm.mlir.cast %0 : f32 to f64
+  llvm.mlir.cast %0 : tensor<?xf32> to f32
 }
 
 // -----
 
 func @mlir_dialect_cast_unranked_memref(%0: memref<*xf32>) {
   // expected-error at +1 {{expected descriptor to be a struct with two elements}}
-  llvm.mlir.cast %0 : memref<*xf32> to !llvm.ptr<float>
+  llvm.mlir.cast %0 : memref<*xf32> to !llvm.ptr<f32>
 }
 
 // -----
@@ -239,12 +228,12 @@ func @mlir_dialect_cast_unranked_memref(%0: memref<*xf32>) {
 
 func @mlir_dialect_cast_unranked_rank(%0: memref<*xf32>) {
   // expected-error at +1 {{expected first element of a memref descriptor to be an index-compatible integer}}
-  llvm.mlir.cast %0 : memref<*xf32> to !llvm.struct<(float, float)>
+  llvm.mlir.cast %0 : memref<*xf32> to !llvm.struct<(f32, f32)>
 }
 
 // -----
 
 func @mlir_dialect_cast_unranked_rank(%0: memref<*xf32>) {
   // expected-error at +1 {{expected second element of a memref descriptor to be an !llvm.ptr<i8>}}
-  llvm.mlir.cast %0 : memref<*xf32> to !llvm.struct<(i64, float)>
+  llvm.mlir.cast %0 : memref<*xf32> to !llvm.struct<(i64, f32)>
 }

diff  --git a/mlir/test/Dialect/LLVMIR/func.mlir b/mlir/test/Dialect/LLVMIR/func.mlir
index d9d7e17a9b50..ab32af24f3b5 100644
--- a/mlir/test/Dialect/LLVMIR/func.mlir
+++ b/mlir/test/Dialect/LLVMIR/func.mlir
@@ -40,8 +40,8 @@ module {
   // CHECK: llvm.func @roundtrip1()
   llvm.func @roundtrip1()
 
-  // CHECK: llvm.func @roundtrip2(i64, !llvm.float) -> !llvm.double
-  llvm.func @roundtrip2(i64, !llvm.float) -> !llvm.double
+  // CHECK: llvm.func @roundtrip2(i64, f32) -> f64
+  llvm.func @roundtrip2(i64, f32) -> f64
 
   // CHECK: llvm.func @roundtrip3(i32, i1)
   llvm.func @roundtrip3(%a: i32, %b: i1)

diff  --git a/mlir/test/Dialect/LLVMIR/global.mlir b/mlir/test/Dialect/LLVMIR/global.mlir
index 9341e774ae84..0371c1850d78 100644
--- a/mlir/test/Dialect/LLVMIR/global.mlir
+++ b/mlir/test/Dialect/LLVMIR/global.mlir
@@ -9,8 +9,8 @@ llvm.mlir.global constant @default_external_constant(42) : i64
 // CHECK: llvm.mlir.global internal @global(42 : i64) : i64
 llvm.mlir.global internal @global(42 : i64) : i64
 
-// CHECK: llvm.mlir.global internal constant @constant(3.700000e+01 : f64) : !llvm.float
-llvm.mlir.global internal constant @constant(37.0) : !llvm.float
+// CHECK: llvm.mlir.global internal constant @constant(3.700000e+01 : f64) : f32
+llvm.mlir.global internal constant @constant(37.0) : f32
 
 // CHECK: llvm.mlir.global internal constant @".string"("foobar")
 llvm.mlir.global internal constant @".string"("foobar") : !llvm.array<6 x i8>

diff  --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir
index 87eddd67f030..b496237f140b 100644
--- a/mlir/test/Dialect/LLVMIR/invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid.mlir
@@ -69,30 +69,30 @@ func @alloca_non_integer_alignment() {
 
 // -----
 
-func @gep_missing_input_result_type(%pos : i64, %base : !llvm.ptr<float>) {
+func @gep_missing_input_result_type(%pos : i64, %base : !llvm.ptr<f32>) {
   // expected-error at +1 {{2 operands present, but expected 0}}
   llvm.getelementptr %base[%pos] : () -> ()
 }
 
 // -----
 
-func @gep_missing_input_type(%pos : i64, %base : !llvm.ptr<float>) {
+func @gep_missing_input_type(%pos : i64, %base : !llvm.ptr<f32>) {
   // expected-error at +1 {{2 operands present, but expected 0}}
-  llvm.getelementptr %base[%pos] : () -> (!llvm.ptr<float>)
+  llvm.getelementptr %base[%pos] : () -> (!llvm.ptr<f32>)
 }
 
 // -----
 
-func @gep_missing_result_type(%pos : i64, %base : !llvm.ptr<float>) {
+func @gep_missing_result_type(%pos : i64, %base : !llvm.ptr<f32>) {
   // expected-error at +1 {{op requires one result}}
-  llvm.getelementptr %base[%pos] : (!llvm.ptr<float>, i64) -> ()
+  llvm.getelementptr %base[%pos] : (!llvm.ptr<f32>, i64) -> ()
 }
 
 // -----
 
-func @gep_non_function_type(%pos : i64, %base : !llvm.ptr<float>) {
+func @gep_non_function_type(%pos : i64, %base : !llvm.ptr<f32>) {
   // expected-error at +1 {{invalid kind of type specified}}
-  llvm.getelementptr %base[%pos] : !llvm.ptr<float>
+  llvm.getelementptr %base[%pos] : !llvm.ptr<f32>
 }
 
 // -----
@@ -104,23 +104,23 @@ func @load_non_llvm_type(%foo : memref<f32>) {
 
 // -----
 
-func @load_non_ptr_type(%foo : !llvm.float) {
+func @load_non_ptr_type(%foo : f32) {
   // expected-error at +1 {{expected LLVM pointer type}}
-  llvm.load %foo : !llvm.float
+  llvm.load %foo : f32
 }
 
 // -----
 
-func @store_non_llvm_type(%foo : memref<f32>, %bar : !llvm.float) {
+func @store_non_llvm_type(%foo : memref<f32>, %bar : f32) {
   // expected-error at +1 {{expected LLVM pointer type}}
   llvm.store %bar, %foo : memref<f32>
 }
 
 // -----
 
-func @store_non_ptr_type(%foo : !llvm.float, %bar : !llvm.float) {
+func @store_non_ptr_type(%foo : f32, %bar : f32) {
   // expected-error at +1 {{expected LLVM pointer type}}
-  llvm.store %bar, %foo : !llvm.float
+  llvm.store %bar, %foo : f32
 }
 
 // -----
@@ -317,23 +317,23 @@ func @extractvalue_wrong_nesting() {
 
 // -----
 
-func @invalid_vector_type_1(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
+func @invalid_vector_type_1(%arg0: !llvm.vec<4 x f32>, %arg1: i32, %arg2: f32) {
   // expected-error at +1 {{expected LLVM IR dialect vector type for operand #1}}
-  %0 = llvm.extractelement %arg2[%arg1 : i32] : !llvm.float
+  %0 = llvm.extractelement %arg2[%arg1 : i32] : f32
 }
 
 // -----
 
-func @invalid_vector_type_2(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
+func @invalid_vector_type_2(%arg0: !llvm.vec<4 x f32>, %arg1: i32, %arg2: f32) {
   // expected-error at +1 {{expected LLVM IR dialect vector type for operand #1}}
-  %0 = llvm.insertelement %arg2, %arg2[%arg1 : i32] : !llvm.float
+  %0 = llvm.insertelement %arg2, %arg2[%arg1 : i32] : f32
 }
 
 // -----
 
-func @invalid_vector_type_3(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
+func @invalid_vector_type_3(%arg0: !llvm.vec<4 x f32>, %arg1: i32, %arg2: f32) {
   // expected-error at +1 {{expected LLVM IR dialect vector type for operand #1}}
-  %0 = llvm.shufflevector %arg2, %arg2 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.float, !llvm.float
+  %0 = llvm.shufflevector %arg2, %arg2 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : f32, f32
 }
 
 // -----
@@ -366,113 +366,113 @@ func @nvvm_invalid_shfl_pred_3(%arg0 : i32, %arg1 : i32, %arg2 : i32, %arg3 : i3
 
 // -----
 
-func @nvvm_invalid_mma_0(%a0 : !llvm.half, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                         %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+func @nvvm_invalid_mma_0(%a0 : f16, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                         %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // expected-error at +1 {{expected operands to be 4 <halfx2>s followed by either 4 <halfx2>s or 8 floats}}
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.half, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, float, float, float, float, float, float, float)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (f16, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }
 
 // -----
 
-func @nvvm_invalid_mma_1(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                         %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+func @nvvm_invalid_mma_1(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                         %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // expected-error at +1 {{expected result type to be a struct of either 4 <halfx2>s or 8 floats}}
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, float, float, float, float, float, float, half)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, half)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f16)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f16)>
 }
 
 // -----
 
-func @nvvm_invalid_mma_2(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                         %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+func @nvvm_invalid_mma_2(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                         %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // expected-error at +1 {{alayout and blayout attributes must be set to either "row" or "col"}}
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, float, float, float, float, float, float, float)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }
 
 // -----
 
-func @nvvm_invalid_mma_3(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.vec<2 x half>, %c1 : !llvm.vec<2 x half>,
-                         %c2 : !llvm.vec<2 x half>, %c3 : !llvm.vec<2 x half>) {
+func @nvvm_invalid_mma_3(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : !llvm.vec<2 x f16>, %c1 : !llvm.vec<2 x f16>,
+                         %c2 : !llvm.vec<2 x f16>, %c3 : !llvm.vec<2 x f16>) {
   // expected-error at +1 {{unimplemented mma.sync variant}}
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3 {alayout="row", blayout="col"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>) -> !llvm.struct<(float, float, float, float, float, float, float, float)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3 {alayout="row", blayout="col"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }
 
 // -----
 
-func @nvvm_invalid_mma_4(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                         %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+func @nvvm_invalid_mma_4(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                         %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // expected-error at +1 {{unimplemented mma.sync variant}}
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(vec<2 x half>, vec<2 x half>, vec<2 x half>, vec<2 x half>)>
-  llvm.return %0 : !llvm.struct<(vec<2 x half>, vec<2 x half>, vec<2 x half>, vec<2 x half>)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(vec<2 x f16>, vec<2 x f16>, vec<2 x f16>, vec<2 x f16>)>
+  llvm.return %0 : !llvm.struct<(vec<2 x f16>, vec<2 x f16>, vec<2 x f16>, vec<2 x f16>)>
 }
 
 // -----
 
-func @nvvm_invalid_mma_5(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                         %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+func @nvvm_invalid_mma_5(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                         %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // expected-error at +1 {{unimplemented mma.sync variant}}
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, float, float, float, float, float, float, float)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }
 
 // -----
 
-func @nvvm_invalid_mma_6(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                         %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+func @nvvm_invalid_mma_6(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                         %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // expected-error at +1 {{invalid kind of type specified}}
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : !llvm.struct<(float, float, float, float, float, float, float, float)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }
 
 // -----
 
-func @nvvm_invalid_mma_7(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                         %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                         %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                         %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+func @nvvm_invalid_mma_7(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                         %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                         %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                         %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // expected-error at +1 {{op requires one result}}
-  %0:2 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> (!llvm.struct<(float, float, float, float, float, float, float, float)>, i32)
-  llvm.return %0#0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+  %0:2 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="col", blayout="row"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> (!llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>, i32)
+  llvm.return %0#0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }
 
 // -----
 
-func @atomicrmw_expected_ptr(%f32 : !llvm.float) {
+func @atomicrmw_expected_ptr(%f32 : f32) {
   // expected-error at +1 {{operand #0 must be LLVM pointer to floating point LLVM type or LLVM integer type}}
-  %0 = "llvm.atomicrmw"(%f32, %f32) {bin_op=11, ordering=1} : (!llvm.float, !llvm.float) -> !llvm.float
+  %0 = "llvm.atomicrmw"(%f32, %f32) {bin_op=11, ordering=1} : (f32, f32) -> f32
   llvm.return
 }
 
 // -----
 
-func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<float>, %i32 : i32) {
+func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %i32 : i32) {
   // expected-error at +1 {{expected LLVM IR element type for operand #0 to match type for operand #1}}
-  %0 = "llvm.atomicrmw"(%f32_ptr, %i32) {bin_op=11, ordering=1} : (!llvm.ptr<float>, i32) -> !llvm.float
+  %0 = "llvm.atomicrmw"(%f32_ptr, %i32) {bin_op=11, ordering=1} : (!llvm.ptr<f32>, i32) -> f32
   llvm.return
 }
 
 // -----
 
-func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<float>, %f32 : !llvm.float) {
+func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
   // expected-error at +1 {{expected LLVM IR result type to match type for operand #1}}
-  %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr<float>, !llvm.float) -> i32
+  %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr<f32>, f32) -> i32
   llvm.return
 }
 
@@ -494,17 +494,17 @@ func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr<i1>, %i1 : i1) {
 
 // -----
 
-func @atomicrmw_expected_int(%f32_ptr : !llvm.ptr<float>, %f32 : !llvm.float) {
+func @atomicrmw_expected_int(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
   // expected-error at +1 {{expected LLVM IR integer type}}
-  %0 = llvm.atomicrmw max %f32_ptr, %f32 unordered : !llvm.float
+  %0 = llvm.atomicrmw max %f32_ptr, %f32 unordered : f32
   llvm.return
 }
 
 // -----
 
-func @cmpxchg_expected_ptr(%f32_ptr : !llvm.ptr<float>, %f32 : !llvm.float) {
+func @cmpxchg_expected_ptr(%f32_ptr : !llvm.ptr<f32>, %f32 : f32) {
   // expected-error at +1 {{op operand #0 must be LLVM pointer to LLVM integer type or LLVM pointer type}}
-  %0 = "llvm.cmpxchg"(%f32, %f32, %f32) {success_ordering=2,failure_ordering=2} : (!llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, i1)>
+  %0 = "llvm.cmpxchg"(%f32, %f32, %f32) {success_ordering=2,failure_ordering=2} : (f32, f32, f32) -> !llvm.struct<(f32, i1)>
   llvm.return
 }
 

diff  --git a/mlir/test/Dialect/LLVMIR/nvvm.mlir b/mlir/test/Dialect/LLVMIR/nvvm.mlir
index 8bb626617b3d..ba0543e18cbc 100644
--- a/mlir/test/Dialect/LLVMIR/nvvm.mlir
+++ b/mlir/test/Dialect/LLVMIR/nvvm.mlir
@@ -36,21 +36,21 @@ func @llvm.nvvm.barrier0() {
 
 func @nvvm_shfl(
     %arg0 : i32, %arg1 : i32, %arg2 : i32,
-    %arg3 : i32, %arg4 : !llvm.float) -> i32 {
+    %arg3 : i32, %arg4 : f32) -> i32 {
   // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : i32
   %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 : i32
-  // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.float
-  %1 = nvvm.shfl.sync.bfly %arg0, %arg4, %arg1, %arg2 : !llvm.float
+  // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : f32
+  %1 = nvvm.shfl.sync.bfly %arg0, %arg4, %arg1, %arg2 : f32
   llvm.return %0 : i32
 }
 
 func @nvvm_shfl_pred(
     %arg0 : i32, %arg1 : i32, %arg2 : i32,
-    %arg3 : i32, %arg4 : !llvm.float) -> !llvm.struct<(i32, i1)> {
+    %arg3 : i32, %arg4 : f32) -> !llvm.struct<(i32, i1)> {
   // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.struct<(i32, i1)>
   %0 = nvvm.shfl.sync.bfly %arg0, %arg3, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(i32, i1)>
-  // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.struct<(float, i1)>
-  %1 = nvvm.shfl.sync.bfly %arg0, %arg4, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(float, i1)>
+  // CHECK: nvvm.shfl.sync.bfly %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.struct<(f32, i1)>
+  %1 = nvvm.shfl.sync.bfly %arg0, %arg4, %arg1, %arg2 {return_value_and_is_valid} : !llvm.struct<(f32, i1)>
   llvm.return %0 : !llvm.struct<(i32, i1)>
 }
 
@@ -60,11 +60,11 @@ func @nvvm_vote(%arg0 : i32, %arg1 : i1) -> i32 {
   llvm.return %0 : i32
 }
 
-func @nvvm_mma(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-               %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-               %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-               %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
-  // CHECK: nvvm.mma.sync {{.*}} {alayout = "row", blayout = "col"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, float, float, float, float, float, float, float)>
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, float, float, float, float, float, float, float)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+func @nvvm_mma(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+               %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+               %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+               %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
+  // CHECK: nvvm.mma.sync {{.*}} {alayout = "row", blayout = "col"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }

diff  --git a/mlir/test/Dialect/LLVMIR/rocdl.mlir b/mlir/test/Dialect/LLVMIR/rocdl.mlir
index caba012286a8..c5314ab4d128 100644
--- a/mlir/test/Dialect/LLVMIR/rocdl.mlir
+++ b/mlir/test/Dialect/LLVMIR/rocdl.mlir
@@ -35,62 +35,62 @@ func @rocdl.barrier() {
   llvm.return
 }
 
-func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float,
-                   %arg2 : !llvm.vec<32 x float>, %arg3 : i32,
-                   %arg4 : !llvm.vec<16 x float>, %arg5 : !llvm.vec<4 x float>,
-                   %arg6 : !llvm.vec<4 x half>, %arg7 : !llvm.vec<32 x i32>,
+func @rocdl.xdlops(%arg0 : f32, %arg1 : f32,
+                   %arg2 : !llvm.vec<32 x f32>, %arg3 : i32,
+                   %arg4 : !llvm.vec<16 x f32>, %arg5 : !llvm.vec<4 x f32>,
+                   %arg6 : !llvm.vec<4 x f16>, %arg7 : !llvm.vec<32 x i32>,
                    %arg8 : !llvm.vec<16 x i32>, %arg9 : !llvm.vec<4 x i32>,
-                   %arg10 : !llvm.vec<2 x i16>) -> !llvm.vec<32 x float> {
+                   %arg10 : !llvm.vec<2 x i16>) -> !llvm.vec<32 x f32> {
   // CHECK-LABEL: rocdl.xdlops
-  // CHECK: rocdl.mfma.f32.32x32x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float>
+  // CHECK: rocdl.mfma.f32.32x32x1f32 {{.*}} : (f32, f32, !llvm.vec<32 x f32>, i32, i32, i32) -> !llvm.vec<32 x f32>
   %r0 = rocdl.mfma.f32.32x32x1f32 %arg0, %arg1, %arg2, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<32 x float>,
-                            i32, i32, i32) -> !llvm.vec<32 x float>
+                            (f32, f32, !llvm.vec<32 x f32>,
+                            i32, i32, i32) -> !llvm.vec<32 x f32>
 
-  // CHECK: rocdl.mfma.f32.16x16x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
+  // CHECK: rocdl.mfma.f32.16x16x1f32 {{.*}} : (f32, f32, !llvm.vec<16 x f32>, i32, i32, i32) -> !llvm.vec<16 x f32>
   %r1 = rocdl.mfma.f32.16x16x1f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (f32, f32, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
-  // CHECK: rocdl.mfma.f32.16x16x4f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
+  // CHECK: rocdl.mfma.f32.16x16x4f32 {{.*}} : (f32, f32, !llvm.vec<4 x f32>, i32, i32, i32) -> !llvm.vec<4 x f32>
   %r2 = rocdl.mfma.f32.16x16x4f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (f32, f32, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
-  // CHECK: rocdl.mfma.f32.4x4x1f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
+  // CHECK: rocdl.mfma.f32.4x4x1f32 {{.*}} : (f32, f32, !llvm.vec<4 x f32>, i32, i32, i32) -> !llvm.vec<4 x f32>
   %r3 = rocdl.mfma.f32.4x4x1f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (f32, f32, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
-  // CHECK: rocdl.mfma.f32.32x32x2f32 {{.*}} : (!llvm.float, !llvm.float, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
+  // CHECK: rocdl.mfma.f32.32x32x2f32 {{.*}} : (f32, f32, !llvm.vec<16 x f32>, i32, i32, i32) -> !llvm.vec<16 x f32>
   %r4= rocdl.mfma.f32.32x32x2f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (f32, f32, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
-  // CHECK: rocdl.mfma.f32.32x32x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float>
+  // CHECK: rocdl.mfma.f32.32x32x4f16 {{.*}} : (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<32 x f32>, i32, i32, i32) -> !llvm.vec<32 x f32>
   %r5 = rocdl.mfma.f32.32x32x4f16 %arg6, %arg6, %arg2, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>,
-                            i32, i32, i32) -> !llvm.vec<32 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<32 x f32>,
+                            i32, i32, i32) -> !llvm.vec<32 x f32>
 
-  // CHECK: rocdl.mfma.f32.16x16x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
+  // CHECK: rocdl.mfma.f32.16x16x4f16 {{.*}} : (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<16 x f32>, i32, i32, i32) -> !llvm.vec<16 x f32>
   %r6 = rocdl.mfma.f32.16x16x4f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
-  // CHECK: rocdl.mfma.f32.4x4x4f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
+  // CHECK: rocdl.mfma.f32.4x4x4f16 {{.*}} : (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<4 x f32>, i32, i32, i32) -> !llvm.vec<4 x f32>
   %r7 = rocdl.mfma.f32.4x4x4f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
-  // CHECK: rocdl.mfma.f32.32x32x8f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
+  // CHECK: rocdl.mfma.f32.32x32x8f16 {{.*}} : (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<16 x f32>, i32, i32, i32) -> !llvm.vec<16 x f32>
   %r8 = rocdl.mfma.f32.32x32x8f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
-  // CHECK: rocdl.mfma.f32.16x16x16f16 {{.*}} : (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
+  // CHECK: rocdl.mfma.f32.16x16x16f16 {{.*}} : (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<4 x f32>, i32, i32, i32) -> !llvm.vec<4 x f32>
   %r9 = rocdl.mfma.f32.16x16x16f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
   // CHECK: rocdl.mfma.i32.32x32x4i8 {{.*}} : (i32, i32, !llvm.vec<32 x i32>, i32, i32, i32) -> !llvm.vec<32 x i32>
   %r10 = rocdl.mfma.i32.32x32x4i8 %arg3, %arg3, %arg7, %arg3, %arg3, %arg3 :
@@ -117,52 +117,52 @@ func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float,
                             (i32, i32, !llvm.vec<4 x i32>,
                             i32, i32, i32) -> !llvm.vec<4 x i32>
 
-  // CHECK: rocdl.mfma.f32.32x32x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>, i32, i32, i32) -> !llvm.vec<32 x float>
+  // CHECK: rocdl.mfma.f32.32x32x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x f32>, i32, i32, i32) -> !llvm.vec<32 x f32>
   %r15 = rocdl.mfma.f32.32x32x2bf16 %arg10, %arg10, %arg2, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>,
-                            i32, i32, i32) -> !llvm.vec<32 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x f32>,
+                            i32, i32, i32) -> !llvm.vec<32 x f32>
 
-  // CHECK: rocdl.mfma.f32.16x16x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
+  // CHECK: rocdl.mfma.f32.16x16x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x f32>, i32, i32, i32) -> !llvm.vec<16 x f32>
   %r16 = rocdl.mfma.f32.16x16x2bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
-  // CHECK: rocdl.mfma.f32.4x4x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
+  // CHECK: rocdl.mfma.f32.4x4x2bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x f32>, i32, i32, i32) -> !llvm.vec<4 x f32>
   %r17 = rocdl.mfma.f32.4x4x2bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
-  // CHECK: rocdl.mfma.f32.32x32x4bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>, i32, i32, i32) -> !llvm.vec<16 x float>
+  // CHECK: rocdl.mfma.f32.32x32x4bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x f32>, i32, i32, i32) -> !llvm.vec<16 x f32>
   %r18 = rocdl.mfma.f32.32x32x4bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
-  // CHECK: rocdl.mfma.f32.16x16x8bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>, i32, i32, i32) -> !llvm.vec<4 x float>
+  // CHECK: rocdl.mfma.f32.16x16x8bf16 {{.*}} : (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x f32>, i32, i32, i32) -> !llvm.vec<4 x f32>
   %r19 = rocdl.mfma.f32.16x16x8bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
-  llvm.return %r0 : !llvm.vec<32 x float>
+  llvm.return %r0 : !llvm.vec<32 x f32>
 }
 
 llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : i32,
                        %offset : i32, %glc : i1,
-                       %slc : i1, %vdata1 : !llvm.vec<1 x float>,
-                       %vdata2 : !llvm.vec<2 x float>, %vdata4 : !llvm.vec<4 x float>) {
+                       %slc : i1, %vdata1 : !llvm.vec<1 x f32>,
+                       %vdata2 : !llvm.vec<2 x f32>, %vdata4 : !llvm.vec<4 x f32>) {
   // CHECK-LABEL: rocdl.mubuf
-  // CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<1 x float>
-  %r1 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x float>
-  // CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<2 x float>
-  %r2 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x float>
-  // CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<4 x float>
-  %r4 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x float>
-
-  // CHECK: rocdl.buffer.store %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<1 x float>
-  rocdl.buffer.store %vdata1, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x float>
-  // CHECK: rocdl.buffer.store %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<2 x float>
-  rocdl.buffer.store %vdata2, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x float>
-  // CHECK: rocdl.buffer.store %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<4 x float>
-  rocdl.buffer.store %vdata4, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x float>
+  // CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<1 x f32>
+  %r1 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x f32>
+  // CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<2 x f32>
+  %r2 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x f32>
+  // CHECK: %{{.*}} = rocdl.buffer.load %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<4 x f32>
+  %r4 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x f32>
+
+  // CHECK: rocdl.buffer.store %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<1 x f32>
+  rocdl.buffer.store %vdata1, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x f32>
+  // CHECK: rocdl.buffer.store %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<2 x f32>
+  rocdl.buffer.store %vdata2, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x f32>
+  // CHECK: rocdl.buffer.store %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} %{{.*}} : !llvm.vec<4 x f32>
+  rocdl.buffer.store %vdata4, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x f32>
 
   llvm.return
 }

diff  --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index 233cc385c253..ff970178ac9f 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -1,8 +1,8 @@
 // RUN: mlir-opt %s | mlir-opt | FileCheck %s
 
 // CHECK-LABEL: func @ops
-// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: !llvm.float, %[[I8PTR1:.*]]: !llvm.ptr<i8>, %[[I8PTR2:.*]]: !llvm.ptr<i8>, %[[BOOL:.*]]: i1)
-func @ops(%arg0: i32, %arg1: !llvm.float,
+// CHECK-SAME: (%[[I32:.*]]: i32, %[[FLOAT:.*]]: f32, %[[I8PTR1:.*]]: !llvm.ptr<i8>, %[[I8PTR2:.*]]: !llvm.ptr<i8>, %[[BOOL:.*]]: i1)
+func @ops(%arg0: i32, %arg1: f32,
           %arg2: !llvm.ptr<i8>, %arg3: !llvm.ptr<i8>,
           %arg4: i1) {
 // Integer arithmetic binary operations.
@@ -26,42 +26,42 @@ func @ops(%arg0: i32, %arg1: !llvm.float,
 
 // Floating point binary operations.
 //
-// CHECK: {{.*}} = llvm.fadd %[[FLOAT]], %[[FLOAT]] : !llvm.float
-// CHECK: {{.*}} = llvm.fsub %[[FLOAT]], %[[FLOAT]] : !llvm.float
-// CHECK: {{.*}} = llvm.fmul %[[FLOAT]], %[[FLOAT]] : !llvm.float
-// CHECK: {{.*}} = llvm.fdiv %[[FLOAT]], %[[FLOAT]] : !llvm.float
-// CHECK: {{.*}} = llvm.frem %[[FLOAT]], %[[FLOAT]] : !llvm.float
-  %8 = llvm.fadd %arg1, %arg1 : !llvm.float
-  %9 = llvm.fsub %arg1, %arg1 : !llvm.float
-  %10 = llvm.fmul %arg1, %arg1 : !llvm.float
-  %11 = llvm.fdiv %arg1, %arg1 : !llvm.float
-  %12 = llvm.frem %arg1, %arg1 : !llvm.float
+// CHECK: {{.*}} = llvm.fadd %[[FLOAT]], %[[FLOAT]] : f32
+// CHECK: {{.*}} = llvm.fsub %[[FLOAT]], %[[FLOAT]] : f32
+// CHECK: {{.*}} = llvm.fmul %[[FLOAT]], %[[FLOAT]] : f32
+// CHECK: {{.*}} = llvm.fdiv %[[FLOAT]], %[[FLOAT]] : f32
+// CHECK: {{.*}} = llvm.frem %[[FLOAT]], %[[FLOAT]] : f32
+  %8 = llvm.fadd %arg1, %arg1 : f32
+  %9 = llvm.fsub %arg1, %arg1 : f32
+  %10 = llvm.fmul %arg1, %arg1 : f32
+  %11 = llvm.fdiv %arg1, %arg1 : f32
+  %12 = llvm.frem %arg1, %arg1 : f32
 
 // Memory-related operations.
 //
-// CHECK-NEXT:  %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x !llvm.double : (i32) -> !llvm.ptr<double>
-// CHECK-NEXT:  %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]], %[[I32]]] : (!llvm.ptr<double>, i32, i32) -> !llvm.ptr<double>
-// CHECK-NEXT:  %[[VALUE:.*]] = llvm.load %[[GEP]] : !llvm.ptr<double>
-// CHECK-NEXT:  llvm.store %[[VALUE]], %[[ALLOCA]] : !llvm.ptr<double>
-// CHECK-NEXT:  %{{.*}} = llvm.bitcast %[[ALLOCA]] : !llvm.ptr<double> to !llvm.ptr<i64>
-  %13 = llvm.alloca %arg0 x !llvm.double : (i32) -> !llvm.ptr<double>
-  %14 = llvm.getelementptr %13[%arg0, %arg0] : (!llvm.ptr<double>, i32, i32) -> !llvm.ptr<double>
-  %15 = llvm.load %14 : !llvm.ptr<double>
-  llvm.store %15, %13 : !llvm.ptr<double>
-  %16 = llvm.bitcast %13 : !llvm.ptr<double> to !llvm.ptr<i64>
+// CHECK-NEXT:  %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x f64 : (i32) -> !llvm.ptr<f64>
+// CHECK-NEXT:  %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]], %[[I32]]] : (!llvm.ptr<f64>, i32, i32) -> !llvm.ptr<f64>
+// CHECK-NEXT:  %[[VALUE:.*]] = llvm.load %[[GEP]] : !llvm.ptr<f64>
+// CHECK-NEXT:  llvm.store %[[VALUE]], %[[ALLOCA]] : !llvm.ptr<f64>
+// CHECK-NEXT:  %{{.*}} = llvm.bitcast %[[ALLOCA]] : !llvm.ptr<f64> to !llvm.ptr<i64>
+  %13 = llvm.alloca %arg0 x f64 : (i32) -> !llvm.ptr<f64>
+  %14 = llvm.getelementptr %13[%arg0, %arg0] : (!llvm.ptr<f64>, i32, i32) -> !llvm.ptr<f64>
+  %15 = llvm.load %14 : !llvm.ptr<f64>
+  llvm.store %15, %13 : !llvm.ptr<f64>
+  %16 = llvm.bitcast %13 : !llvm.ptr<f64> to !llvm.ptr<i64>
 
 // Function call-related operations.
 //
-// CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (i32) -> !llvm.struct<(i32, double, i32)>
-// CHECK: %[[VALUE:.*]] = llvm.extractvalue %[[STRUCT]][0] : !llvm.struct<(i32, double, i32)>
-// CHECK: %[[NEW_STRUCT:.*]] = llvm.insertvalue %[[VALUE]], %[[STRUCT]][2] : !llvm.struct<(i32, double, i32)>
-// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr<func<struct<(i32, double, i32)> (i32)>>
-// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : (i32) -> !llvm.struct<(i32, double, i32)>
-  %17 = llvm.call @foo(%arg0) : (i32) -> !llvm.struct<(i32, double, i32)>
-  %18 = llvm.extractvalue %17[0] : !llvm.struct<(i32, double, i32)>
-  %19 = llvm.insertvalue %18, %17[2] : !llvm.struct<(i32, double, i32)>
-  %20 = llvm.mlir.addressof @foo : !llvm.ptr<func<struct<(i32, double, i32)> (i32)>>
-  %21 = llvm.call %20(%arg0) : (i32) -> !llvm.struct<(i32, double, i32)>
+// CHECK: %[[STRUCT:.*]] = llvm.call @foo(%[[I32]]) : (i32) -> !llvm.struct<(i32, f64, i32)>
+// CHECK: %[[VALUE:.*]] = llvm.extractvalue %[[STRUCT]][0] : !llvm.struct<(i32, f64, i32)>
+// CHECK: %[[NEW_STRUCT:.*]] = llvm.insertvalue %[[VALUE]], %[[STRUCT]][2] : !llvm.struct<(i32, f64, i32)>
+// CHECK: %[[FUNC:.*]] = llvm.mlir.addressof @foo : !llvm.ptr<func<struct<(i32, f64, i32)> (i32)>>
+// CHECK: %{{.*}} = llvm.call %[[FUNC]](%[[I32]]) : (i32) -> !llvm.struct<(i32, f64, i32)>
+  %17 = llvm.call @foo(%arg0) : (i32) -> !llvm.struct<(i32, f64, i32)>
+  %18 = llvm.extractvalue %17[0] : !llvm.struct<(i32, f64, i32)>
+  %19 = llvm.insertvalue %18, %17[2] : !llvm.struct<(i32, f64, i32)>
+  %20 = llvm.mlir.addressof @foo : !llvm.ptr<func<struct<(i32, f64, i32)> (i32)>>
+  %21 = llvm.call %20(%arg0) : (i32) -> !llvm.struct<(i32, f64, i32)>
 
 
 // Terminator operations and their successors.
@@ -76,9 +76,9 @@ func @ops(%arg0: i32, %arg1: !llvm.float,
 
 // CHECK: ^[[BB2]]
 ^bb2:
-// CHECK: %{{.*}} = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
+// CHECK: %{{.*}} = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)>
 // CHECK: %{{.*}} = llvm.mlir.constant(42 : i64) : i47
-  %22 = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
+  %22 = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)>
   %23 = llvm.mlir.constant(42) : i47
   // CHECK:      llvm.switch %0, ^[[BB3]] [
   // CHECK-NEXT:   1: ^[[BB4:.*]],
@@ -128,19 +128,19 @@ func @ops(%arg0: i32, %arg1: !llvm.float,
 
 // Extended and Quad floating point
 //
-// CHECK: %{{.*}} = llvm.fpext %[[FLOAT]] : !llvm.float to !llvm.x86_fp80
-// CHECK: %{{.*}} = llvm.fpext %[[FLOAT]] : !llvm.float to !llvm.fp128
-  %27 = llvm.fpext %arg1 : !llvm.float to !llvm.x86_fp80
-  %28 = llvm.fpext %arg1 : !llvm.float to !llvm.fp128
+// CHECK: %{{.*}} = llvm.fpext %[[FLOAT]] : f32 to !llvm.x86_fp80
+// CHECK: %{{.*}} = llvm.fpext %[[FLOAT]] : f32 to !llvm.fp128
+  %27 = llvm.fpext %arg1 : f32 to !llvm.x86_fp80
+  %28 = llvm.fpext %arg1 : f32 to !llvm.fp128
 
-// CHECK: %{{.*}} = llvm.fneg %[[FLOAT]] : !llvm.float
-  %29 = llvm.fneg %arg1 : !llvm.float
+// CHECK: %{{.*}} = llvm.fneg %[[FLOAT]] : f32
+  %29 = llvm.fneg %arg1 : f32
 
-// CHECK: "llvm.intr.sin"(%[[FLOAT]]) : (!llvm.float) -> !llvm.float
-  %30 = "llvm.intr.sin"(%arg1) : (!llvm.float) -> !llvm.float
+// CHECK: "llvm.intr.sin"(%[[FLOAT]]) : (f32) -> f32
+  %30 = "llvm.intr.sin"(%arg1) : (f32) -> f32
 
-// CHECK: "llvm.intr.pow"(%[[FLOAT]], %[[FLOAT]]) : (!llvm.float, !llvm.float) -> !llvm.float
-  %31 = "llvm.intr.pow"(%arg1, %arg1) : (!llvm.float, !llvm.float) -> !llvm.float
+// CHECK: "llvm.intr.pow"(%[[FLOAT]], %[[FLOAT]]) : (f32, f32) -> f32
+  %31 = "llvm.intr.pow"(%arg1, %arg1) : (f32, f32) -> f32
 
 // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (i32) -> i32
   %32 = "llvm.intr.bitreverse"(%arg0) : (i32) -> i32
@@ -164,62 +164,62 @@ func @ops(%arg0: i32, %arg1: !llvm.float,
 }
 
 // An larger self-contained function.
-// CHECK-LABEL: llvm.func @foo(%{{.*}}: i32) -> !llvm.struct<(i32, double, i32)> {
-llvm.func @foo(%arg0: i32) -> !llvm.struct<(i32, double, i32)> {
+// CHECK-LABEL: llvm.func @foo(%{{.*}}: i32) -> !llvm.struct<(i32, f64, i32)> {
+llvm.func @foo(%arg0: i32) -> !llvm.struct<(i32, f64, i32)> {
 // CHECK:  %[[V0:.*]] = llvm.mlir.constant(3 : i64) : i32
 // CHECK:  %[[V1:.*]] = llvm.mlir.constant(3 : i64) : i32
-// CHECK:  %[[V2:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : !llvm.double
-// CHECK:  %[[V3:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : !llvm.double
+// CHECK:  %[[V2:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : f64
+// CHECK:  %[[V3:.*]] = llvm.mlir.constant(4.200000e+01 : f64) : f64
 // CHECK:  %[[V4:.*]] = llvm.add %[[V0]], %[[V1]] : i32
 // CHECK:  %[[V5:.*]] = llvm.mul %[[V4]], %[[V1]] : i32
-// CHECK:  %[[V6:.*]] = llvm.fadd %[[V2]], %[[V3]] : !llvm.double
-// CHECK:  %[[V7:.*]] = llvm.fsub %[[V3]], %[[V6]] : !llvm.double
+// CHECK:  %[[V6:.*]] = llvm.fadd %[[V2]], %[[V3]] : f64
+// CHECK:  %[[V7:.*]] = llvm.fsub %[[V3]], %[[V6]] : f64
 // CHECK:  %[[V8:.*]] = llvm.mlir.constant(1 : i64) : i1
 // CHECK:  llvm.cond_br %[[V8]], ^[[BB1:.*]](%[[V4]] : i32), ^[[BB2:.*]](%[[V4]] : i32)
   %0 = llvm.mlir.constant(3) : i32
   %1 = llvm.mlir.constant(3) : i32
-  %2 = llvm.mlir.constant(4.200000e+01) : !llvm.double
-  %3 = llvm.mlir.constant(4.200000e+01) : !llvm.double
+  %2 = llvm.mlir.constant(4.200000e+01) : f64
+  %3 = llvm.mlir.constant(4.200000e+01) : f64
   %4 = llvm.add %0, %1 : i32
   %5 = llvm.mul %4, %1 : i32
-  %6 = llvm.fadd %2, %3 : !llvm.double
-  %7 = llvm.fsub %3, %6 : !llvm.double
+  %6 = llvm.fadd %2, %3 : f64
+  %7 = llvm.fsub %3, %6 : f64
   %8 = llvm.mlir.constant(1) : i1
   llvm.cond_br %8, ^bb1(%4 : i32), ^bb2(%4 : i32)
 
 // CHECK:^[[BB1]](%[[V9:.*]]: i32):
-// CHECK:  %[[V10:.*]] = llvm.call @foo(%[[V9]]) : (i32) -> !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V11:.*]] = llvm.extractvalue %[[V10]][0] : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V12:.*]] = llvm.extractvalue %[[V10]][1] : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V13:.*]] = llvm.extractvalue %[[V10]][2] : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V14:.*]] = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V15:.*]] = llvm.insertvalue %[[V5]], %[[V14]][0] : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V16:.*]] = llvm.insertvalue %[[V7]], %[[V15]][1] : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V17:.*]] = llvm.insertvalue %[[V11]], %[[V16]][2] : !llvm.struct<(i32, double, i32)>
-// CHECK:  llvm.return %[[V17]] : !llvm.struct<(i32, double, i32)>
+// CHECK:  %[[V10:.*]] = llvm.call @foo(%[[V9]]) : (i32) -> !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V11:.*]] = llvm.extractvalue %[[V10]][0] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V12:.*]] = llvm.extractvalue %[[V10]][1] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V13:.*]] = llvm.extractvalue %[[V10]][2] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V14:.*]] = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V15:.*]] = llvm.insertvalue %[[V5]], %[[V14]][0] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V16:.*]] = llvm.insertvalue %[[V7]], %[[V15]][1] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V17:.*]] = llvm.insertvalue %[[V11]], %[[V16]][2] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  llvm.return %[[V17]] : !llvm.struct<(i32, f64, i32)>
 ^bb1(%9: i32):
-  %10 = llvm.call @foo(%9) : (i32) -> !llvm.struct<(i32, double, i32)>
-  %11 = llvm.extractvalue %10[0] : !llvm.struct<(i32, double, i32)>
-  %12 = llvm.extractvalue %10[1] : !llvm.struct<(i32, double, i32)>
-  %13 = llvm.extractvalue %10[2] : !llvm.struct<(i32, double, i32)>
-  %14 = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
-  %15 = llvm.insertvalue %5, %14[0] : !llvm.struct<(i32, double, i32)>
-  %16 = llvm.insertvalue %7, %15[1] : !llvm.struct<(i32, double, i32)>
-  %17 = llvm.insertvalue %11, %16[2] : !llvm.struct<(i32, double, i32)>
-  llvm.return %17 : !llvm.struct<(i32, double, i32)>
+  %10 = llvm.call @foo(%9) : (i32) -> !llvm.struct<(i32, f64, i32)>
+  %11 = llvm.extractvalue %10[0] : !llvm.struct<(i32, f64, i32)>
+  %12 = llvm.extractvalue %10[1] : !llvm.struct<(i32, f64, i32)>
+  %13 = llvm.extractvalue %10[2] : !llvm.struct<(i32, f64, i32)>
+  %14 = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)>
+  %15 = llvm.insertvalue %5, %14[0] : !llvm.struct<(i32, f64, i32)>
+  %16 = llvm.insertvalue %7, %15[1] : !llvm.struct<(i32, f64, i32)>
+  %17 = llvm.insertvalue %11, %16[2] : !llvm.struct<(i32, f64, i32)>
+  llvm.return %17 : !llvm.struct<(i32, f64, i32)>
 
 // CHECK:^[[BB2]](%[[V18:.*]]: i32):
-// CHECK:  %[[V19:.*]] = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V20:.*]] = llvm.insertvalue %[[V18]], %[[V19]][0] : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V21:.*]] = llvm.insertvalue %[[V7]], %[[V20]][1] : !llvm.struct<(i32, double, i32)>
-// CHECK:  %[[V22:.*]] = llvm.insertvalue %[[V5]], %[[V21]][2] : !llvm.struct<(i32, double, i32)>
-// CHECK:  llvm.return %[[V22]] : !llvm.struct<(i32, double, i32)>
+// CHECK:  %[[V19:.*]] = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V20:.*]] = llvm.insertvalue %[[V18]], %[[V19]][0] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V21:.*]] = llvm.insertvalue %[[V7]], %[[V20]][1] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  %[[V22:.*]] = llvm.insertvalue %[[V5]], %[[V21]][2] : !llvm.struct<(i32, f64, i32)>
+// CHECK:  llvm.return %[[V22]] : !llvm.struct<(i32, f64, i32)>
 ^bb2(%18: i32):
-  %19 = llvm.mlir.undef : !llvm.struct<(i32, double, i32)>
-  %20 = llvm.insertvalue %18, %19[0] : !llvm.struct<(i32, double, i32)>
-  %21 = llvm.insertvalue %7, %20[1] : !llvm.struct<(i32, double, i32)>
-  %22 = llvm.insertvalue %5, %21[2] : !llvm.struct<(i32, double, i32)>
-  llvm.return %22 : !llvm.struct<(i32, double, i32)>
+  %19 = llvm.mlir.undef : !llvm.struct<(i32, f64, i32)>
+  %20 = llvm.insertvalue %18, %19[0] : !llvm.struct<(i32, f64, i32)>
+  %21 = llvm.insertvalue %7, %20[1] : !llvm.struct<(i32, f64, i32)>
+  %22 = llvm.insertvalue %5, %21[2] : !llvm.struct<(i32, f64, i32)>
+  llvm.return %22 : !llvm.struct<(i32, f64, i32)>
 }
 
 // CHECK-LABEL: @casts
@@ -238,29 +238,29 @@ func @casts(%arg0: i32, %arg1: i64, %arg2: !llvm.vec<4 x i32>,
   %4 = llvm.zext %arg2 : !llvm.vec<4 x i32> to !llvm.vec<4 x i64>
 // CHECK:  = llvm.trunc %[[V4I64]] : !llvm.vec<4 x i64> to !llvm.vec<4 x i56>
   %5 = llvm.trunc %arg3 : !llvm.vec<4 x i64> to !llvm.vec<4 x i56>
-// CHECK:  = llvm.sitofp %[[I32]] : i32 to !llvm.float
-  %6 = llvm.sitofp %arg0 : i32 to !llvm.float
-// CHECK: %[[FLOAT:.*]] = llvm.uitofp %[[I32]] : i32 to !llvm.float
-  %7 = llvm.uitofp %arg0 : i32 to !llvm.float
-// CHECK:  = llvm.fptosi %[[FLOAT]] : !llvm.float to i32
-  %8 = llvm.fptosi %7 : !llvm.float to i32
-// CHECK:  = llvm.fptoui %[[FLOAT]] : !llvm.float to i32
-  %9 = llvm.fptoui %7 : !llvm.float to i32
+// CHECK:  = llvm.sitofp %[[I32]] : i32 to f32
+  %6 = llvm.sitofp %arg0 : i32 to f32
+// CHECK: %[[FLOAT:.*]] = llvm.uitofp %[[I32]] : i32 to f32
+  %7 = llvm.uitofp %arg0 : i32 to f32
+// CHECK:  = llvm.fptosi %[[FLOAT]] : f32 to i32
+  %8 = llvm.fptosi %7 : f32 to i32
+// CHECK:  = llvm.fptoui %[[FLOAT]] : f32 to i32
+  %9 = llvm.fptoui %7 : f32 to i32
 // CHECK:  = llvm.addrspacecast %[[I32PTR]] : !llvm.ptr<i32> to !llvm.ptr<i32, 2>
   %10 = llvm.addrspacecast %arg4 : !llvm.ptr<i32> to !llvm.ptr<i32, 2>
   llvm.return
 }
 
 // CHECK-LABEL: @vect
-func @vect(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
-// CHECK:  = llvm.extractelement {{.*}} : !llvm.vec<4 x float>
-  %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x float>
-// CHECK:  = llvm.insertelement {{.*}} : !llvm.vec<4 x float>
-  %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x float>
-// CHECK:  = llvm.shufflevector {{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
-  %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
-// CHECK:  = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x float>
-  %3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x float>
+func @vect(%arg0: !llvm.vec<4 x f32>, %arg1: i32, %arg2: f32) {
+// CHECK:  = llvm.extractelement {{.*}} : !llvm.vec<4 x f32>
+  %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x f32>
+// CHECK:  = llvm.insertelement {{.*}} : !llvm.vec<4 x f32>
+  %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x f32>
+// CHECK:  = llvm.shufflevector {{.*}} [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x f32>, !llvm.vec<4 x f32>
+  %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x f32>, !llvm.vec<4 x f32>
+// CHECK:  = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x f32>
+  %3 = llvm.mlir.constant(dense<1.0> : vector<4xf32>) : !llvm.vec<4 x f32>
   return
 }
 
@@ -283,9 +283,9 @@ func @null() {
 }
 
 // CHECK-LABEL: @atomicrmw
-func @atomicrmw(%ptr : !llvm.ptr<float>, %val : !llvm.float) {
-  // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} unordered : !llvm.float
-  %0 = llvm.atomicrmw fadd %ptr, %val unordered : !llvm.float
+func @atomicrmw(%ptr : !llvm.ptr<f32>, %val : f32) {
+  // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} unordered : f32
+  %0 = llvm.atomicrmw fadd %ptr, %val unordered : f32
   llvm.return
 }
 
@@ -311,7 +311,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
 // CHECK: %[[a6:.*]] = llvm.bitcast %[[a5]] : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
 // CHECK: %[[a7:.*]] = llvm.mlir.constant(1 : i32) : i32
 // CHECK: %[[a8:.*]] = llvm.alloca %[[a7]] x i8 : (i32) -> !llvm.ptr<i8>
-// CHECK: %{{.*}} = llvm.invoke @foo(%[[a7]]) to ^[[BB2:.*]] unwind ^[[BB1:.*]] : (i32) -> !llvm.struct<(i32, double, i32)>
+// CHECK: %{{.*}} = llvm.invoke @foo(%[[a7]]) to ^[[BB2:.*]] unwind ^[[BB1:.*]] : (i32) -> !llvm.struct<(i32, f64, i32)>
   %0 = llvm.mlir.constant(0 : i32) : i32
   %1 = llvm.mlir.constant(3 : i32) : i32
   %2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8>
@@ -321,7 +321,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
   %6 = llvm.bitcast %5 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
   %7 = llvm.mlir.constant(1 : i32) : i32
   %8 = llvm.alloca %7 x i8 : (i32) -> !llvm.ptr<i8>
-  %9 = llvm.invoke @foo(%7) to ^bb2 unwind ^bb1 : (i32) -> !llvm.struct<(i32, double, i32)>
+  %9 = llvm.invoke @foo(%7) to ^bb2 unwind ^bb1 : (i32) -> !llvm.struct<(i32, f64, i32)>
 
 // CHECK: ^[[BB1]]:
 // CHECK:   %[[lp:.*]] = llvm.landingpad cleanup (catch %[[a3]] : !llvm.ptr<ptr<i8>>) (catch %[[a6]] : !llvm.ptr<i8>) (filter %[[a2]] : !llvm.array<1 x i8>) : !llvm.struct<(ptr<i8>, i32)>
@@ -389,33 +389,33 @@ llvm.func @useInlineAsm(%arg0: i32) {
 }
 
 // CHECK-LABEL: @fastmathFlags
-func @fastmathFlags(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32) {
-// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-// CHECK: {{.*}} = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-// CHECK: {{.*}} = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-// CHECK: {{.*}} = llvm.fdiv %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-// CHECK: {{.*}} = llvm.frem %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-  %0 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-  %1 = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-  %2 = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-  %3 = llvm.fdiv %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-  %4 = llvm.frem %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-
-// CHECK: {{.*}} = llvm.fcmp "oeq" %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-  %5 = llvm.fcmp "oeq" %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-
-// CHECK: {{.*}} = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-  %6 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : !llvm.float
-
-// CHECK: {{.*}} = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, double, i32)>
-  %7 = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, double, i32)>
-
-// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 : !llvm.float
-  %8 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<>} : !llvm.float
-// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
-  %9 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
-
-// CHECK: {{.*}} = llvm.fneg %arg0 : !llvm.float
-  %10 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<>} : !llvm.float
+func @fastmathFlags(%arg0: f32, %arg1: f32, %arg2: i32) {
+// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+// CHECK: {{.*}} = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+// CHECK: {{.*}} = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+// CHECK: {{.*}} = llvm.fdiv %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+// CHECK: {{.*}} = llvm.frem %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+  %0 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+  %1 = llvm.fsub %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+  %2 = llvm.fmul %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+  %3 = llvm.fdiv %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+  %4 = llvm.frem %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+
+// CHECK: {{.*}} = llvm.fcmp "oeq" %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+  %5 = llvm.fcmp "oeq" %arg0, %arg1 {fastmathFlags = #llvm.fastmath<fast>} : f32
+
+// CHECK: {{.*}} = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : f32
+  %6 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<fast>} : f32
+
+// CHECK: {{.*}} = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, f64, i32)>
+  %7 = llvm.call @foo(%arg2) {fastmathFlags = #llvm.fastmath<fast>} : (i32) -> !llvm.struct<(i32, f64, i32)>
+
+// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 : f32
+  %8 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<>} : f32
+// CHECK: {{.*}} = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
+  %9 = llvm.fadd %arg0, %arg1 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
+
+// CHECK: {{.*}} = llvm.fneg %arg0 : f32
+  %10 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<>} : f32
   return
 }

diff  --git a/mlir/test/Dialect/LLVMIR/types-invalid.mlir b/mlir/test/Dialect/LLVMIR/types-invalid.mlir
index 3982f912ead2..a2a6d6163dad 100644
--- a/mlir/test/Dialect/LLVMIR/types-invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/types-invalid.mlir
@@ -77,7 +77,7 @@ func @struct_literal_opaque() {
 
 func @unexpected_type() {
   // expected-error @+1 {{unexpected type, expected keyword}}
-  "some.op"() : () -> !llvm.f32
+  "some.op"() : () -> !llvm.tensor<*xf32>
 }
 
 // -----
@@ -113,14 +113,14 @@ func @identified_struct_with_void() {
 
 func @dynamic_vector() {
   // expected-error @+1 {{expected '? x <integer> x <type>' or '<integer> x <type>'}}
-  "some.op"() : () -> !llvm.vec<? x float>
+  "some.op"() : () -> !llvm.vec<? x f32>
 }
 
 // -----
 
 func @dynamic_scalable_vector() {
   // expected-error @+1 {{expected '? x <integer> x <type>' or '<integer> x <type>'}}
-  "some.op"() : () -> !llvm.vec<? x ? x float>
+  "some.op"() : () -> !llvm.vec<? x ? x f32>
 }
 
 // -----
@@ -158,6 +158,30 @@ func private @deprecated_int() -> !llvm.i32
 
 // -----
 
-
 // expected-error @+1 {{unexpected type, expected keyword}}
 func private @unexpected_type() -> !llvm.tensor<*xf32>
+
+// -----
+
+// expected-warning @+1 {{deprecated syntax, use bf16 instead}}
+func private @deprecated_bfloat() -> !llvm.bfloat
+
+// -----
+
+// expected-warning @+1 {{deprecated syntax, use f16 instead}}
+func private @deprecated_half() -> !llvm.half
+
+// -----
+
+// expected-warning @+1 {{deprecated syntax, use f32 instead}}
+func private @deprecated_float() -> !llvm.float
+
+// -----
+
+// expected-warning @+1 {{deprecated syntax, use f64 instead}}
+func private @deprecated_double() -> !llvm.double
+
+// -----
+
+// expected-error @+1 {{unexpected type, expected keyword}}
+func private @unexpected_type() -> !llvm.f32

diff  --git a/mlir/test/Dialect/LLVMIR/types.mlir b/mlir/test/Dialect/LLVMIR/types.mlir
index f54f00a3f719..74e0e7e93633 100644
--- a/mlir/test/Dialect/LLVMIR/types.mlir
+++ b/mlir/test/Dialect/LLVMIR/types.mlir
@@ -4,14 +4,14 @@
 func @primitive() {
   // CHECK: !llvm.void
   "some.op"() : () -> !llvm.void
-  // CHECK: !llvm.half
-  "some.op"() : () -> !llvm.half
-  // CHECK: !llvm.bfloat
-  "some.op"() : () -> !llvm.bfloat
-  // CHECK: !llvm.float
-  "some.op"() : () -> !llvm.float
-  // CHECK: !llvm.double
-  "some.op"() : () -> !llvm.double
+  // CHECK: f16
+  "some.op"() : () -> f16
+  // CHECK: bf16
+  "some.op"() : () -> bf16
+  // CHECK: f32
+  "some.op"() : () -> f32
+  // CHECK: f64
+  "some.op"() : () -> f64
   // CHECK: !llvm.fp128
   "some.op"() : () -> !llvm.fp128
   // CHECK: !llvm.x86_fp80
@@ -37,8 +37,8 @@ func @func() {
   "some.op"() : () -> !llvm.func<void (i32)>
   // CHECK: !llvm.func<i32 ()>
   "some.op"() : () -> !llvm.func<i32 ()>
-  // CHECK: !llvm.func<i32 (half, bfloat, float, double)>
-  "some.op"() : () -> !llvm.func<i32 (half, bfloat, float, double)>
+  // CHECK: !llvm.func<i32 (f16, bf16, f32, f64)>
+  "some.op"() : () -> !llvm.func<i32 (f16, bf16, f32, f64)>
   // CHECK: !llvm.func<i32 (i32, i32)>
   "some.op"() : () -> !llvm.func<i32 (i32, i32)>
   // CHECK: !llvm.func<void (...)>
@@ -71,8 +71,8 @@ func @integer() {
 func @ptr() {
   // CHECK: !llvm.ptr<i8>
   "some.op"() : () -> !llvm.ptr<i8>
-  // CHECK: !llvm.ptr<float>
-  "some.op"() : () -> !llvm.ptr<float>
+  // CHECK: !llvm.ptr<f32>
+  "some.op"() : () -> !llvm.ptr<f32>
   // CHECK: !llvm.ptr<ptr<i8>>
   "some.op"() : () -> !llvm.ptr<ptr<i8>>
   // CHECK: !llvm.ptr<ptr<ptr<ptr<ptr<i8>>>>>
@@ -92,12 +92,12 @@ func @ptr() {
 func @vec() {
   // CHECK: !llvm.vec<4 x i32>
   "some.op"() : () -> !llvm.vec<4 x i32>
-  // CHECK: !llvm.vec<4 x float>
-  "some.op"() : () -> !llvm.vec<4 x float>
+  // CHECK: !llvm.vec<4 x f32>
+  "some.op"() : () -> !llvm.vec<4 x f32>
   // CHECK: !llvm.vec<? x 4 x i32>
   "some.op"() : () -> !llvm.vec<? x 4 x i32>
-  // CHECK: !llvm.vec<? x 8 x half>
-  "some.op"() : () -> !llvm.vec<? x 8 x half>
+  // CHECK: !llvm.vec<? x 8 x f16>
+  "some.op"() : () -> !llvm.vec<? x 8 x f16>
   // CHECK: !llvm.vec<4 x ptr<i8>>
   "some.op"() : () -> !llvm.vec<4 x ptr<i8>>
   return
@@ -107,12 +107,12 @@ func @vec() {
 func @array() {
   // CHECK: !llvm.array<10 x i32>
   "some.op"() : () -> !llvm.array<10 x i32>
-  // CHECK: !llvm.array<8 x float>
-  "some.op"() : () -> !llvm.array<8 x float>
+  // CHECK: !llvm.array<8 x f32>
+  "some.op"() : () -> !llvm.array<8 x f32>
   // CHECK: !llvm.array<10 x ptr<i32, 4>>
   "some.op"() : () -> !llvm.array<10 x ptr<i32, 4>>
-  // CHECK: !llvm.array<10 x array<4 x float>>
-  "some.op"() : () -> !llvm.array<10 x array<4 x float>>
+  // CHECK: !llvm.array<10 x array<4 x f32>>
+  "some.op"() : () -> !llvm.array<10 x array<4 x f32>>
   return
 }
 
@@ -122,25 +122,25 @@ func @literal_struct() {
   "some.op"() : () -> !llvm.struct<()>
   // CHECK: !llvm.struct<(i32)>
   "some.op"() : () -> !llvm.struct<(i32)>
-  // CHECK: !llvm.struct<(float, i32)>
-  "some.op"() : () -> !llvm.struct<(float, i32)>
+  // CHECK: !llvm.struct<(f32, i32)>
+  "some.op"() : () -> !llvm.struct<(f32, i32)>
   // CHECK: !llvm.struct<(struct<(i32)>)>
   "some.op"() : () -> !llvm.struct<(struct<(i32)>)>
-  // CHECK: !llvm.struct<(i32, struct<(i32)>, float)>
-  "some.op"() : () -> !llvm.struct<(i32, struct<(i32)>, float)>
+  // CHECK: !llvm.struct<(i32, struct<(i32)>, f32)>
+  "some.op"() : () -> !llvm.struct<(i32, struct<(i32)>, f32)>
 
   // CHECK: !llvm.struct<packed ()>
   "some.op"() : () -> !llvm.struct<packed ()>
   // CHECK: !llvm.struct<packed (i32)>
   "some.op"() : () -> !llvm.struct<packed (i32)>
-  // CHECK: !llvm.struct<packed (float, i32)>
-  "some.op"() : () -> !llvm.struct<packed (float, i32)>
-  // CHECK: !llvm.struct<packed (float, i32)>
-  "some.op"() : () -> !llvm.struct<packed (float, i32)>
+  // CHECK: !llvm.struct<packed (f32, i32)>
+  "some.op"() : () -> !llvm.struct<packed (f32, i32)>
+  // CHECK: !llvm.struct<packed (f32, i32)>
+  "some.op"() : () -> !llvm.struct<packed (f32, i32)>
   // CHECK: !llvm.struct<packed (struct<(i32)>)>
   "some.op"() : () -> !llvm.struct<packed (struct<(i32)>)>
-  // CHECK: !llvm.struct<packed (i32, struct<(i32, i1)>, float)>
-  "some.op"() : () -> !llvm.struct<packed (i32, struct<(i32, i1)>, float)>
+  // CHECK: !llvm.struct<packed (i32, struct<(i32, i1)>, f32)>
+  "some.op"() : () -> !llvm.struct<packed (i32, struct<(i32, i1)>, f32)>
 
   // CHECK: !llvm.struct<(struct<packed (i32)>)>
   "some.op"() : () -> !llvm.struct<(struct<packed (i32)>)>
@@ -155,8 +155,8 @@ func @identified_struct() {
   "some.op"() : () -> !llvm.struct<"empty", ()>
   // CHECK: !llvm.struct<"opaque", opaque>
   "some.op"() : () -> !llvm.struct<"opaque", opaque>
-  // CHECK: !llvm.struct<"long", (i32, struct<(i32, i1)>, float, ptr<func<void ()>>)>
-  "some.op"() : () -> !llvm.struct<"long", (i32, struct<(i32, i1)>, float, ptr<func<void ()>>)>
+  // CHECK: !llvm.struct<"long", (i32, struct<(i32, i1)>, f32, ptr<func<void ()>>)>
+  "some.op"() : () -> !llvm.struct<"long", (i32, struct<(i32, i1)>, f32, ptr<func<void ()>>)>
   // CHECK: !llvm.struct<"self-recursive", (ptr<struct<"self-recursive">>)>
   "some.op"() : () -> !llvm.struct<"self-recursive", (ptr<struct<"self-recursive">>)>
   // CHECK: !llvm.struct<"unpacked", (i32)>
@@ -183,8 +183,8 @@ func @identified_struct() {
 }
 
 func @verbose() {
-  // CHECK: !llvm.struct<(i64, struct<(float)>)>
-  "some.op"() : () -> !llvm.struct<(i64, !llvm.struct<(!llvm.float)>)>
+  // CHECK: !llvm.struct<(i64, struct<(f32)>)>
+  "some.op"() : () -> !llvm.struct<(i64, !llvm.struct<(f32)>)>
   return
 }
 
@@ -202,8 +202,8 @@ func @verbose() {
 
 // CHECK: aliases
 llvm.func @aliases() {
-  // CHECK: !llvm.struct<(i32, float, struct<(i64)>)>
-  "some.op"() : () -> !llvm.struct<(i32, float, !qux)>
+  // CHECK: !llvm.struct<(i32, f32, struct<(i64)>)>
+  "some.op"() : () -> !llvm.struct<(i32, f32, !qux)>
   // CHECK: !llvm.struct<"a", (ptr<struct<"a">>)>
   "some.op"() : () -> !rec
   llvm.return

diff  --git a/mlir/test/Dialect/Linalg/llvm.mlir b/mlir/test/Dialect/Linalg/llvm.mlir
index 829406bf21f8..8b023c388937 100644
--- a/mlir/test/Dialect/Linalg/llvm.mlir
+++ b/mlir/test/Dialect/Linalg/llvm.mlir
@@ -20,20 +20,20 @@ func @slice(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: !linalg.range)
 }
 // CHECK-LABEL: func @slice
 //   insert data ptr for slice op
-//       CHECK:   llvm.extractvalue %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-//  CHECK-NEXT:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+//       CHECK:   llvm.extractvalue %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+//  CHECK-NEXT:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 //  CHECK-NEXT:   llvm.extractvalue %{{.*}}[0] : !llvm.struct<(i64, i64, i64)>
 //  CHECK-NEXT:   llvm.mul %{{.*}}, %{{.*}} : i64
 //  CHECK-NEXT:   llvm.add %{{.*}}, %{{.*}} : i64
 //    insert offset
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-//  CHECK-NEXT:   llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+//  CHECK-NEXT:   llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 //  CHECK-NEXT:   llvm.mlir.constant(0 : index)
 //  CHECK-NEXT:   llvm.extractvalue %{{.*}}[0] : !llvm.struct<(i64, i64, i64)>
 //  CHECK-NEXT:   llvm.extractvalue %{{.*}}[1] : !llvm.struct<(i64, i64, i64)>
 //  CHECK-NEXT:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(i64, i64, i64)>
 //    get size[0] from parent view
-//  CHECK-NEXT:   llvm.extractvalue %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+//  CHECK-NEXT:   llvm.extractvalue %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 //  CHECK-NEXT:   llvm.icmp "slt" %{{.*}}, %{{.*}} : i64
 //  CHECK-NEXT:   llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, i64
 //    compute size[0] bounded by parent view's size[0]
@@ -44,8 +44,8 @@ func @slice(%arg0: memref<?xf32, offset: ?, strides: [1]>, %arg1: !linalg.range)
 //    compute stride[0] using bounded size
 //  CHECK-NEXT:   llvm.mul %{{.*}}, %{{.*}} : i64
 //    insert size and stride
-//  CHECK-NEXT:   llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
-//  CHECK-NEXT:   llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<1 x i64>, array<1 x i64>)>
+//  CHECK-NEXT:   llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
+//  CHECK-NEXT:   llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<1 x i64>, array<1 x i64>)>
 
 func @slice_with_range_and_index(%arg0: memref<?x?xf64, offset: ?, strides: [?, 1]>) {
   %c0 = constant 0 : index
@@ -58,16 +58,16 @@ func @slice_with_range_and_index(%arg0: memref<?x?xf64, offset: ?, strides: [?,
 }
 // CHECK-LABEL: func @slice_with_range_and_index
 // loop-body.
-//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<double>, ptr<double>, i64, array<1 x i64>, array<1 x i64>)>
-//       CHECK:   llvm.extractvalue %{{.*}}[4, 0] : !llvm.struct<(ptr<double>, ptr<double>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.extractvalue %{{.*}}[4, 1] : !llvm.struct<(ptr<double>, ptr<double>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<double>, ptr<double>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<double>, ptr<double>, i64, array<1 x i64>, array<1 x i64>)>
-//       CHECK:   llvm.insertvalue %{{.*}}[2] : !llvm.struct<(ptr<double>, ptr<double>, i64, array<1 x i64>, array<1 x i64>)>
+//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<1 x i64>, array<1 x i64>)>
+//       CHECK:   llvm.extractvalue %{{.*}}[4, 0] : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.extractvalue %{{.*}}[4, 1] : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<1 x i64>, array<1 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}[2] : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<1 x i64>, array<1 x i64>)>
 //       CHECK:   llvm.extractvalue %{{.*}}[0] : !llvm.struct<(i64, i64, i64)>
 //       CHECK:   llvm.extractvalue %{{.*}}[1] : !llvm.struct<(i64, i64, i64)>
-//       CHECK:   llvm.insertvalue %{{.*}}[3, 0] : !llvm.struct<(ptr<double>, ptr<double>, i64, array<1 x i64>, array<1 x i64>)>
-//       CHECK:   llvm.insertvalue %{{.*}}[4, 0] : !llvm.struct<(ptr<double>, ptr<double>, i64, array<1 x i64>, array<1 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}[3, 0] : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<1 x i64>, array<1 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}[4, 0] : !llvm.struct<(ptr<f64>, ptr<f64>, i64, array<1 x i64>, array<1 x i64>)>
 
 func @reshape_static_expand(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
   // Reshapes that expand a contiguous tensor with some 1's.
@@ -78,33 +78,33 @@ func @reshape_static_expand(%arg0: memref<3x4x5xf32>) -> memref<1x3x4x1x5xf32> {
   return %0 : memref<1x3x4x1x5xf32>
 }
 // CHECK-LABEL: func @reshape_static_expand
-//       CHECK:    llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-//       CHECK:    llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-//       CHECK:    llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-//       CHECK:    llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(1 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(3 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(4 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(1 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 3] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 3] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(5 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 4] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(60 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(20 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(5 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(5 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 3] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 3] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 //       CHECK:    llvm.mlir.constant(1 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 4] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
 
 func @reshape_static_collapse(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32> {
   %0 = linalg.reshape %arg0 [affine_map<(i, j, k, l, m) -> (i, j)>,
@@ -114,56 +114,56 @@ func @reshape_static_collapse(%arg0: memref<1x3x4x1x5xf32>) -> memref<3x4x5xf32>
   return %0 : memref<3x4x5xf32>
 }
 // CHECK-LABEL: func @reshape_static_collapse
-//       CHECK:    llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
-//       CHECK:    llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<5 x i64>, array<5 x i64>)>
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<5 x i64>, array<5 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:    llvm.mlir.constant(3 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:    llvm.mlir.constant(4 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:    llvm.mlir.constant(5 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[3, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:    llvm.mlir.constant(20 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:    llvm.mlir.constant(5 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 //       CHECK:    llvm.mlir.constant(1 : index) : i64
-//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<3 x i64>, array<3 x i64>)>
+//       CHECK:    llvm.insertvalue %{{.*}}, %{{.*}}[4, 2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<3 x i64>, array<3 x i64>)>
 
 func @reshape_fold_zero_dim(%arg0 : memref<1x1xf32>) -> memref<f32> {
   %0 = linalg.reshape %arg0 [] : memref<1x1xf32> into memref<f32>
   return %0 : memref<f32>
 }
 // CHECK-LABEL: func @reshape_fold_zero_dim
-//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-//       CHECK:   llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-//       CHECK:   llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-//       CHECK:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
+//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+//       CHECK:   llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+//       CHECK:   llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+//       CHECK:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
 
 func @reshape_expand_zero_dim(%arg0 : memref<f32>) -> memref<1x1xf32> {
   %0 = linalg.reshape %arg0 [] : memref<f32> into memref<1x1xf32>
   return %0 : memref<1x1xf32>
 }
 // CHECK-LABEL: func @reshape_expand_zero_dim
-//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
-//       CHECK:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64)>
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.mlir.undef : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.extractvalue %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[2] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //       CHECK:   llvm.mlir.constant(1 : index) : i64
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[3, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //       CHECK:   llvm.mlir.constant(1 : index) : i64
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[3, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //       CHECK:   llvm.mlir.constant(1 : index) : i64
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[4, 0] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
 //       CHECK:   llvm.mlir.constant(1 : index) : i64
-//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<float>, ptr<float>, i64, array<2 x i64>, array<2 x i64>)>
+//       CHECK:   llvm.insertvalue %{{.*}}, %{{.*}}[4, 1] : !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>

diff  --git a/mlir/test/Target/avx512.mlir b/mlir/test/Target/avx512.mlir
index 32d250282d56..c32593836015 100644
--- a/mlir/test/Target/avx512.mlir
+++ b/mlir/test/Target/avx512.mlir
@@ -1,31 +1,31 @@
 // RUN: mlir-opt -verify-diagnostics %s | mlir-opt | mlir-translate --avx512-mlir-to-llvmir | FileCheck %s
 
 // CHECK-LABEL: define <16 x float> @LLVM_x86_avx512_mask_ps_512
-llvm.func @LLVM_x86_avx512_mask_ps_512(%a: !llvm.vec<16 x float>,
+llvm.func @LLVM_x86_avx512_mask_ps_512(%a: !llvm.vec<16 x f32>,
                                        %b: i32,
                                        %c: i16)
-  -> (!llvm.vec<16 x float>)
+  -> (!llvm.vec<16 x f32>)
 {
   // CHECK: call <16 x float> @llvm.x86.avx512.mask.rndscale.ps.512(<16 x float>
   %0 = "llvm_avx512.mask.rndscale.ps.512"(%a, %b, %a, %c, %b) :
-    (!llvm.vec<16 x float>, i32, !llvm.vec<16 x float>, i16, i32) -> !llvm.vec<16 x float>
+    (!llvm.vec<16 x f32>, i32, !llvm.vec<16 x f32>, i16, i32) -> !llvm.vec<16 x f32>
   // CHECK: call <16 x float> @llvm.x86.avx512.mask.scalef.ps.512(<16 x float>
   %1 = "llvm_avx512.mask.scalef.ps.512"(%a, %a, %a, %c, %b) :
-    (!llvm.vec<16 x float>, !llvm.vec<16 x float>, !llvm.vec<16 x float>, i16, i32) -> !llvm.vec<16 x float>
-  llvm.return %1: !llvm.vec<16 x float>
+    (!llvm.vec<16 x f32>, !llvm.vec<16 x f32>, !llvm.vec<16 x f32>, i16, i32) -> !llvm.vec<16 x f32>
+  llvm.return %1: !llvm.vec<16 x f32>
 }
 
 // CHECK-LABEL: define <8 x double> @LLVM_x86_avx512_mask_pd_512
-llvm.func @LLVM_x86_avx512_mask_pd_512(%a: !llvm.vec<8 x double>,
+llvm.func @LLVM_x86_avx512_mask_pd_512(%a: !llvm.vec<8 x f64>,
                                        %b: i32,
                                        %c: i8)
-  -> (!llvm.vec<8 x double>)
+  -> (!llvm.vec<8 x f64>)
 {
   // CHECK: call <8 x double> @llvm.x86.avx512.mask.rndscale.pd.512(<8 x double>
   %0 = "llvm_avx512.mask.rndscale.pd.512"(%a, %b, %a, %c, %b) :
-    (!llvm.vec<8 x double>, i32, !llvm.vec<8 x double>, i8, i32) -> !llvm.vec<8 x double>
+    (!llvm.vec<8 x f64>, i32, !llvm.vec<8 x f64>, i8, i32) -> !llvm.vec<8 x f64>
   // CHECK: call <8 x double> @llvm.x86.avx512.mask.scalef.pd.512(<8 x double>
   %1 = "llvm_avx512.mask.scalef.pd.512"(%a, %a, %a, %c, %b) :
-    (!llvm.vec<8 x double>, !llvm.vec<8 x double>, !llvm.vec<8 x double>, i8, i32) -> !llvm.vec<8 x double>
-  llvm.return %1: !llvm.vec<8 x double>
+    (!llvm.vec<8 x f64>, !llvm.vec<8 x f64>, !llvm.vec<8 x f64>, i8, i32) -> !llvm.vec<8 x f64>
+  llvm.return %1: !llvm.vec<8 x f64>
 }

diff  --git a/mlir/test/Target/import.ll b/mlir/test/Target/import.ll
index 925320f9a2a1..c7b9218fca10 100644
--- a/mlir/test/Target/import.ll
+++ b/mlir/test/Target/import.ll
@@ -5,7 +5,7 @@
 
 ; CHECK: llvm.mlir.global external @g1() : !llvm.struct<"struct.s", (struct<"struct.t", ()>, i64)>
 @g1 = external global %struct.s, align 8
-; CHECK: llvm.mlir.global external @g2() : !llvm.double
+; CHECK: llvm.mlir.global external @g2() : f64
 @g2 = external global double, align 8
 ; CHECK: llvm.mlir.global internal @g3("string")
 @g3 = internal global [6 x i8] c"string"
@@ -55,7 +55,7 @@
 
 ; CHECK: llvm.mlir.global internal constant @vector_constant(dense<[1, 2]> : vector<2xi32>) : !llvm.vec<2 x i32>
 @vector_constant = internal constant <2 x i32> <i32 1, i32 2>
-; CHECK: llvm.mlir.global internal constant @array_constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x float>
+; CHECK: llvm.mlir.global internal constant @array_constant(dense<[1.000000e+00, 2.000000e+00]> : tensor<2xf32>) : !llvm.array<2 x f32>
 @array_constant = internal constant [2 x float] [float 1., float 2.]
 ; CHECK: llvm.mlir.global internal constant @nested_array_constant(dense<[{{\[}}1, 2], [3, 4]]> : tensor<2x2xi32>) : !llvm.array<2 x array<2 x i32>>
 @nested_array_constant = internal constant [2 x [2 x i32]] [[2 x i32] [i32 1, i32 2], [2 x i32] [i32 3, i32 4]]
@@ -73,7 +73,7 @@ define internal void @func_internal() {
   ret void
 }
 
-; CHECK: llvm.func @fe(i32) -> !llvm.float
+; CHECK: llvm.func @fe(i32) -> f32
 declare float @fe(i32)
 
 ; FIXME: function attributes.
@@ -86,18 +86,18 @@ define internal dso_local i32 @f1(i64 %a) norecurse {
 entry:
 ; CHECK: %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr<i64>
   %aa = inttoptr i64 %a to i64*
-; %[[addrof:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<double>
-; %[[addrof2:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<double>
+; %[[addrof:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<f64>
+; %[[addrof2:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<f64>
 ; %{{[0-9]+}} = llvm.inttoptr %arg0 : i64 to !llvm.ptr<i64>
-; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr<double> to i64
-; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr<double>, i32) -> !llvm.ptr<double>
+; %{{[0-9]+}} = llvm.ptrtoint %[[addrof2]] : !llvm.ptr<f64> to i64
+; %{{[0-9]+}} = llvm.getelementptr %[[addrof]][%3] : (!llvm.ptr<f64>, i32) -> !llvm.ptr<f64>
   %bb = ptrtoint double* @g2 to i64
   %cc = getelementptr double, double* @g2, i32 2
 ; CHECK: %[[b:[0-9]+]] = llvm.trunc %arg0 : i64 to i32
   %b = trunc i64 %a to i32
-; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) : (i32) -> !llvm.float
+; CHECK: %[[c:[0-9]+]] = llvm.call @fe(%[[b]]) : (i32) -> f32
   %c = call float @fe(i32 %b)
-; CHECK: %[[d:[0-9]+]] = llvm.fptosi %[[c]] : !llvm.float to i32
+; CHECK: %[[d:[0-9]+]] = llvm.fptosi %[[c]] : f32 to i32
   %d = fptosi float %c to i32
 ; FIXME: icmp should return i1.
 ; CHECK: %[[e:[0-9]+]] = llvm.icmp "ne" %[[d]], %[[c2]] : i32
@@ -163,8 +163,8 @@ next:
 
 ; CHECK-LABEL: llvm.func @f3() -> !llvm.ptr<i32>
 define i32* @f3() {
-; CHECK: %[[c:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<double>
-; CHECK: %[[b:[0-9]+]] = llvm.bitcast %[[c]] : !llvm.ptr<double> to !llvm.ptr<i32>
+; CHECK: %[[c:[0-9]+]] = llvm.mlir.addressof @g2 : !llvm.ptr<f64>
+; CHECK: %[[b:[0-9]+]] = llvm.bitcast %[[c]] : !llvm.ptr<f64> to !llvm.ptr<i32>
 ; CHECK: llvm.return %[[b]] : !llvm.ptr<i32>
   ret i32* bitcast (double* @g2 to i32*)
 }
@@ -206,31 +206,31 @@ define void @f6(void (i16) *%fn) {
   ret void
 }
 
-; CHECK-LABEL: llvm.func @FPArithmetic(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.double, %arg3: !llvm.double)
+; CHECK-LABEL: llvm.func @FPArithmetic(%arg0: f32, %arg1: f32, %arg2: f64, %arg3: f64)
 define void @FPArithmetic(float %a, float %b, double %c, double %d) {
-  ; CHECK: %[[a1:[0-9]+]] = llvm.mlir.constant(3.030000e+01 : f64) : !llvm.double
-  ; CHECK: %[[a2:[0-9]+]] = llvm.mlir.constant(3.030000e+01 : f32) : !llvm.float
-  ; CHECK: %[[a3:[0-9]+]] = llvm.fadd %[[a2]], %arg0 : !llvm.float
+  ; CHECK: %[[a1:[0-9]+]] = llvm.mlir.constant(3.030000e+01 : f64) : f64
+  ; CHECK: %[[a2:[0-9]+]] = llvm.mlir.constant(3.030000e+01 : f32) : f32
+  ; CHECK: %[[a3:[0-9]+]] = llvm.fadd %[[a2]], %arg0 : f32
   %1 = fadd float 0x403E4CCCC0000000, %a
-  ; CHECK: %[[a4:[0-9]+]] = llvm.fadd %arg0, %arg1 : !llvm.float
+  ; CHECK: %[[a4:[0-9]+]] = llvm.fadd %arg0, %arg1 : f32
   %2 = fadd float %a, %b
-  ; CHECK: %[[a5:[0-9]+]] = llvm.fadd %[[a1]], %arg2 : !llvm.double
+  ; CHECK: %[[a5:[0-9]+]] = llvm.fadd %[[a1]], %arg2 : f64
   %3 = fadd double 3.030000e+01, %c
-  ; CHECK: %[[a6:[0-9]+]] = llvm.fsub %arg0, %arg1 : !llvm.float
+  ; CHECK: %[[a6:[0-9]+]] = llvm.fsub %arg0, %arg1 : f32
   %4 = fsub float %a, %b
-  ; CHECK: %[[a7:[0-9]+]] = llvm.fsub %arg2, %arg3 : !llvm.double
+  ; CHECK: %[[a7:[0-9]+]] = llvm.fsub %arg2, %arg3 : f64
   %5 = fsub double %c, %d
-  ; CHECK: %[[a8:[0-9]+]] = llvm.fmul %arg0, %arg1 : !llvm.float
+  ; CHECK: %[[a8:[0-9]+]] = llvm.fmul %arg0, %arg1 : f32
   %6 = fmul float %a, %b
-  ; CHECK: %[[a9:[0-9]+]] = llvm.fmul %arg2, %arg3 : !llvm.double
+  ; CHECK: %[[a9:[0-9]+]] = llvm.fmul %arg2, %arg3 : f64
   %7 = fmul double %c, %d
-  ; CHECK: %[[a10:[0-9]+]] = llvm.fdiv %arg0, %arg1 : !llvm.float
+  ; CHECK: %[[a10:[0-9]+]] = llvm.fdiv %arg0, %arg1 : f32
   %8 = fdiv float %a, %b
-  ; CHECK: %[[a12:[0-9]+]] = llvm.fdiv %arg2, %arg3 : !llvm.double
+  ; CHECK: %[[a12:[0-9]+]] = llvm.fdiv %arg2, %arg3 : f64
   %9 = fdiv double %c, %d
-  ; CHECK: %[[a11:[0-9]+]] = llvm.frem %arg0, %arg1 : !llvm.float
+  ; CHECK: %[[a11:[0-9]+]] = llvm.frem %arg0, %arg1 : f32
   %10 = frem float %a, %b
-  ; CHECK: %[[a13:[0-9]+]] = llvm.frem %arg2, %arg3 : !llvm.double
+  ; CHECK: %[[a13:[0-9]+]] = llvm.frem %arg2, %arg3 : f64
   %11 = frem double %c, %d
   ret void
 }

diff  --git a/mlir/test/Target/llvmir-intrinsics.mlir b/mlir/test/Target/llvmir-intrinsics.mlir
index 0760e3110e6e..eedaa3e924f0 100644
--- a/mlir/test/Target/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/llvmir-intrinsics.mlir
@@ -1,128 +1,128 @@
 // RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
 
 // CHECK-LABEL: @intrinsics
-llvm.func @intrinsics(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x float>, %arg3: !llvm.ptr<i8>) {
+llvm.func @intrinsics(%arg0: f32, %arg1: f32, %arg2: !llvm.vec<8 x f32>, %arg3: !llvm.ptr<i8>) {
   %c3 = llvm.mlir.constant(3 : i32) : i32
   %c1 = llvm.mlir.constant(1 : i32) : i32
   %c0 = llvm.mlir.constant(0 : i32) : i32
   // CHECK: call float @llvm.fmuladd.f32
-  "llvm.intr.fmuladd"(%arg0, %arg1, %arg0) : (!llvm.float, !llvm.float, !llvm.float) -> !llvm.float
+  "llvm.intr.fmuladd"(%arg0, %arg1, %arg0) : (f32, f32, f32) -> f32
   // CHECK: call <8 x float> @llvm.fmuladd.v8f32
-  "llvm.intr.fmuladd"(%arg2, %arg2, %arg2) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.fmuladd"(%arg2, %arg2, %arg2) : (!llvm.vec<8 x f32>, !llvm.vec<8 x f32>, !llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   // CHECK: call float @llvm.fma.f32
-  "llvm.intr.fma"(%arg0, %arg1, %arg0) : (!llvm.float, !llvm.float, !llvm.float) -> !llvm.float
+  "llvm.intr.fma"(%arg0, %arg1, %arg0) : (f32, f32, f32) -> f32
   // CHECK: call <8 x float> @llvm.fma.v8f32
-  "llvm.intr.fma"(%arg2, %arg2, %arg2) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.fma"(%arg2, %arg2, %arg2) : (!llvm.vec<8 x f32>, !llvm.vec<8 x f32>, !llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   // CHECK: call void @llvm.prefetch.p0i8(i8* %3, i32 0, i32 3, i32 1)
   "llvm.intr.prefetch"(%arg3, %c0, %c3, %c1) : (!llvm.ptr<i8>, i32, i32, i32) -> ()
   llvm.return
 }
 
 // CHECK-LABEL: @exp_test
-llvm.func @exp_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @exp_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.exp.f32
-  "llvm.intr.exp"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.exp"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.exp.v8f32
-  "llvm.intr.exp"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.exp"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @exp2_test
-llvm.func @exp2_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @exp2_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.exp2.f32
-  "llvm.intr.exp2"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.exp2"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.exp2.v8f32
-  "llvm.intr.exp2"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.exp2"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @log_test
-llvm.func @log_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @log_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.log.f32
-  "llvm.intr.log"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.log"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.log.v8f32
-  "llvm.intr.log"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.log"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @log10_test
-llvm.func @log10_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @log10_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.log10.f32
-  "llvm.intr.log10"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.log10"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.log10.v8f32
-  "llvm.intr.log10"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.log10"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @log2_test
-llvm.func @log2_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @log2_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.log2.f32
-  "llvm.intr.log2"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.log2"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.log2.v8f32
-  "llvm.intr.log2"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.log2"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @fabs_test
-llvm.func @fabs_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @fabs_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.fabs.f32
-  "llvm.intr.fabs"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.fabs"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.fabs.v8f32
-  "llvm.intr.fabs"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.fabs"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @sqrt_test
-llvm.func @sqrt_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @sqrt_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.sqrt.f32
-  "llvm.intr.sqrt"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.sqrt"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.sqrt.v8f32
-  "llvm.intr.sqrt"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.sqrt"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @ceil_test
-llvm.func @ceil_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @ceil_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.ceil.f32
-  "llvm.intr.ceil"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.ceil"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.ceil.v8f32
-  "llvm.intr.ceil"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.ceil"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @floor_test
-llvm.func @floor_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @floor_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.floor.f32
-  "llvm.intr.floor"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.floor"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.floor.v8f32
-  "llvm.intr.floor"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.floor"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @cos_test
-llvm.func @cos_test(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>) {
+llvm.func @cos_test(%arg0: f32, %arg1: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.cos.f32
-  "llvm.intr.cos"(%arg0) : (!llvm.float) -> !llvm.float
+  "llvm.intr.cos"(%arg0) : (f32) -> f32
   // CHECK: call <8 x float> @llvm.cos.v8f32
-  "llvm.intr.cos"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.cos"(%arg1) : (!llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @copysign_test
-llvm.func @copysign_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x float>, %arg3: !llvm.vec<8 x float>) {
+llvm.func @copysign_test(%arg0: f32, %arg1: f32, %arg2: !llvm.vec<8 x f32>, %arg3: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.copysign.f32
-  "llvm.intr.copysign"(%arg0, %arg1) : (!llvm.float, !llvm.float) -> !llvm.float
+  "llvm.intr.copysign"(%arg0, %arg1) : (f32, f32) -> f32
   // CHECK: call <8 x float> @llvm.copysign.v8f32
-  "llvm.intr.copysign"(%arg2, %arg3) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.copysign"(%arg2, %arg3) : (!llvm.vec<8 x f32>, !llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @pow_test
-llvm.func @pow_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x float>, %arg3: !llvm.vec<8 x float>) {
+llvm.func @pow_test(%arg0: f32, %arg1: f32, %arg2: !llvm.vec<8 x f32>, %arg3: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.pow.f32
-  "llvm.intr.pow"(%arg0, %arg1) : (!llvm.float, !llvm.float) -> !llvm.float
+  "llvm.intr.pow"(%arg0, %arg1) : (f32, f32) -> f32
   // CHECK: call <8 x float> @llvm.pow.v8f32
-  "llvm.intr.pow"(%arg2, %arg3) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.pow"(%arg2, %arg3) : (!llvm.vec<8 x f32>, !llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
@@ -145,20 +145,20 @@ llvm.func @ctpop_test(%arg0: i32, %arg1: !llvm.vec<8 x i32>) {
 }
 
 // CHECK-LABEL: @maxnum_test
-llvm.func @maxnum_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x float>, %arg3: !llvm.vec<8 x float>) {
+llvm.func @maxnum_test(%arg0: f32, %arg1: f32, %arg2: !llvm.vec<8 x f32>, %arg3: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.maxnum.f32
-  "llvm.intr.maxnum"(%arg0, %arg1) : (!llvm.float, !llvm.float) -> !llvm.float
+  "llvm.intr.maxnum"(%arg0, %arg1) : (f32, f32) -> f32
   // CHECK: call <8 x float> @llvm.maxnum.v8f32
-  "llvm.intr.maxnum"(%arg2, %arg3) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.maxnum"(%arg2, %arg3) : (!llvm.vec<8 x f32>, !llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @minnum_test
-llvm.func @minnum_test(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: !llvm.vec<8 x float>, %arg3: !llvm.vec<8 x float>) {
+llvm.func @minnum_test(%arg0: f32, %arg1: f32, %arg2: !llvm.vec<8 x f32>, %arg3: !llvm.vec<8 x f32>) {
   // CHECK: call float @llvm.minnum.f32
-  "llvm.intr.minnum"(%arg0, %arg1) : (!llvm.float, !llvm.float) -> !llvm.float
+  "llvm.intr.minnum"(%arg0, %arg1) : (f32, f32) -> f32
   // CHECK: call <8 x float> @llvm.minnum.v8f32
-  "llvm.intr.minnum"(%arg2, %arg3) : (!llvm.vec<8 x float>, !llvm.vec<8 x float>) -> !llvm.vec<8 x float>
+  "llvm.intr.minnum"(%arg2, %arg3) : (!llvm.vec<8 x f32>, !llvm.vec<8 x f32>) -> !llvm.vec<8 x f32>
   llvm.return
 }
 
@@ -181,15 +181,15 @@ llvm.func @smin_test(%arg0: i32, %arg1: i32, %arg2: !llvm.vec<8 x i32>, %arg3: !
 }
 
 // CHECK-LABEL: @vector_reductions
-llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>, %arg2: !llvm.vec<8 x i32>) {
+llvm.func @vector_reductions(%arg0: f32, %arg1: !llvm.vec<8 x f32>, %arg2: !llvm.vec<8 x i32>) {
   // CHECK: call i32 @llvm.vector.reduce.add.v8i32
   "llvm.intr.vector.reduce.add"(%arg2) : (!llvm.vec<8 x i32>) -> i32
   // CHECK: call i32 @llvm.vector.reduce.and.v8i32
   "llvm.intr.vector.reduce.and"(%arg2) : (!llvm.vec<8 x i32>) -> i32
   // CHECK: call float @llvm.vector.reduce.fmax.v8f32
-  "llvm.intr.vector.reduce.fmax"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.float
+  "llvm.intr.vector.reduce.fmax"(%arg1) : (!llvm.vec<8 x f32>) -> f32
   // CHECK: call float @llvm.vector.reduce.fmin.v8f32
-  "llvm.intr.vector.reduce.fmin"(%arg1) : (!llvm.vec<8 x float>) -> !llvm.float
+  "llvm.intr.vector.reduce.fmin"(%arg1) : (!llvm.vec<8 x f32>) -> f32
   // CHECK: call i32 @llvm.vector.reduce.mul.v8i32
   "llvm.intr.vector.reduce.mul"(%arg2) : (!llvm.vec<8 x i32>) -> i32
   // CHECK: call i32 @llvm.vector.reduce.or.v8i32
@@ -203,13 +203,13 @@ llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>, %a
   // CHECK: call i32 @llvm.vector.reduce.umin.v8i32
   "llvm.intr.vector.reduce.umin"(%arg2) : (!llvm.vec<8 x i32>) -> i32
   // CHECK: call float @llvm.vector.reduce.fadd.v8f32
-  "llvm.intr.vector.reduce.fadd"(%arg0, %arg1) : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float
+  "llvm.intr.vector.reduce.fadd"(%arg0, %arg1) : (f32, !llvm.vec<8 x f32>) -> f32
   // CHECK: call float @llvm.vector.reduce.fmul.v8f32
-  "llvm.intr.vector.reduce.fmul"(%arg0, %arg1) : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float
+  "llvm.intr.vector.reduce.fmul"(%arg0, %arg1) : (f32, !llvm.vec<8 x f32>) -> f32
   // CHECK: call reassoc float @llvm.vector.reduce.fadd.v8f32
-  "llvm.intr.vector.reduce.fadd"(%arg0, %arg1) {reassoc = true} : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float
+  "llvm.intr.vector.reduce.fadd"(%arg0, %arg1) {reassoc = true} : (f32, !llvm.vec<8 x f32>) -> f32
   // CHECK: call reassoc float @llvm.vector.reduce.fmul.v8f32
-  "llvm.intr.vector.reduce.fmul"(%arg0, %arg1) {reassoc = true} : (!llvm.float, !llvm.vec<8 x float>) -> !llvm.float
+  "llvm.intr.vector.reduce.fmul"(%arg0, %arg1) {reassoc = true} : (f32, !llvm.vec<8 x f32>) -> f32
   // CHECK: call i32 @llvm.vector.reduce.xor.v8i32
   "llvm.intr.vector.reduce.xor"(%arg2) : (!llvm.vec<8 x i32>) -> i32
   llvm.return
@@ -217,23 +217,23 @@ llvm.func @vector_reductions(%arg0: !llvm.float, %arg1: !llvm.vec<8 x float>, %a
 
 // CHECK-LABEL: @matrix_intrinsics
 //                                       4x16                       16x3
-llvm.func @matrix_intrinsics(%A: !llvm.vec<64 x float>, %B: !llvm.vec<48 x float>,
-                             %ptr: !llvm.ptr<float>, %stride: i64) {
+llvm.func @matrix_intrinsics(%A: !llvm.vec<64 x f32>, %B: !llvm.vec<48 x f32>,
+                             %ptr: !llvm.ptr<f32>, %stride: i64) {
   // CHECK: call <12 x float> @llvm.matrix.multiply.v12f32.v64f32.v48f32(<64 x float> %0, <48 x float> %1, i32 4, i32 16, i32 3)
   %C = llvm.intr.matrix.multiply %A, %B
     { lhs_rows = 4: i32, lhs_columns = 16: i32 , rhs_columns = 3: i32} :
-    (!llvm.vec<64 x float>, !llvm.vec<48 x float>) -> !llvm.vec<12 x float>
+    (!llvm.vec<64 x f32>, !llvm.vec<48 x f32>) -> !llvm.vec<12 x f32>
   // CHECK: call <48 x float> @llvm.matrix.transpose.v48f32(<48 x float> %1, i32 3, i32 16)
   %D = llvm.intr.matrix.transpose %B { rows = 3: i32, columns = 16: i32} :
-    !llvm.vec<48 x float> into !llvm.vec<48 x float>
+    !llvm.vec<48 x f32> into !llvm.vec<48 x f32>
   // CHECK: call <48 x float> @llvm.matrix.column.major.load.v48f32(float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
   %E = llvm.intr.matrix.column.major.load %ptr, <stride=%stride>
     { isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
-    !llvm.vec<48 x float> from !llvm.ptr<float> stride i64
+    !llvm.vec<48 x f32> from !llvm.ptr<f32> stride i64
   // CHECK: call void @llvm.matrix.column.major.store.v48f32(<48 x float> %7, float* align 4 %2, i64 %3, i1 false, i32 3, i32 16)
   llvm.intr.matrix.column.major.store %E, %ptr, <stride=%stride>
     { isVolatile = 0: i1, rows = 3: i32, columns = 16: i32} :
-    !llvm.vec<48 x float> to !llvm.ptr<float> stride i64
+    !llvm.vec<48 x f32> to !llvm.ptr<f32> stride i64
   llvm.return
 }
 
@@ -245,41 +245,41 @@ llvm.func @get_active_lane_mask(%base: i64, %n: i64) -> (!llvm.vec<7 x i1>) {
 }
 
 // CHECK-LABEL: @masked_load_store_intrinsics
-llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr<vec<7 x float>>, %mask: !llvm.vec<7 x i1>) {
+llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr<vec<7 x f32>>, %mask: !llvm.vec<7 x i1>) {
   // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
   %a = llvm.intr.masked.load %A, %mask { alignment = 1: i32} :
-    (!llvm.ptr<vec<7 x float>>, !llvm.vec<7 x i1>) -> !llvm.vec<7 x float>
+    (!llvm.ptr<vec<7 x f32>>, !llvm.vec<7 x i1>) -> !llvm.vec<7 x f32>
   // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0v7f32(<7 x float>* %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %b = llvm.intr.masked.load %A, %mask, %a { alignment = 1: i32} :
-    (!llvm.ptr<vec<7 x float>>, !llvm.vec<7 x i1>, !llvm.vec<7 x float>) -> !llvm.vec<7 x float>
+    (!llvm.ptr<vec<7 x f32>>, !llvm.vec<7 x i1>, !llvm.vec<7 x f32>) -> !llvm.vec<7 x f32>
   // CHECK: call void @llvm.masked.store.v7f32.p0v7f32(<7 x float> %{{.*}}, <7 x float>* %0, i32 {{.*}}, <7 x i1> %{{.*}})
   llvm.intr.masked.store %b, %A, %mask { alignment = 1: i32} :
-    !llvm.vec<7 x float>, !llvm.vec<7 x i1> into !llvm.ptr<vec<7 x float>>
+    !llvm.vec<7 x f32>, !llvm.vec<7 x i1> into !llvm.ptr<vec<7 x f32>>
   llvm.return
 }
 
 // CHECK-LABEL: @masked_gather_scatter_intrinsics
-llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr<float>>, %mask: !llvm.vec<7 x i1>) {
+llvm.func @masked_gather_scatter_intrinsics(%M: !llvm.vec<7 x ptr<f32>>, %mask: !llvm.vec<7 x i1>) {
   // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> undef)
   %a = llvm.intr.masked.gather %M, %mask { alignment = 1: i32} :
-      (!llvm.vec<7 x ptr<float>>, !llvm.vec<7 x i1>) -> !llvm.vec<7 x float>
+      (!llvm.vec<7 x ptr<f32>>, !llvm.vec<7 x i1>) -> !llvm.vec<7 x f32>
   // CHECK: call <7 x float> @llvm.masked.gather.v7f32.v7p0f32(<7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %b = llvm.intr.masked.gather %M, %mask, %a { alignment = 1: i32} :
-      (!llvm.vec<7 x ptr<float>>, !llvm.vec<7 x i1>, !llvm.vec<7 x float>) -> !llvm.vec<7 x float>
+      (!llvm.vec<7 x ptr<f32>>, !llvm.vec<7 x i1>, !llvm.vec<7 x f32>) -> !llvm.vec<7 x f32>
   // CHECK: call void @llvm.masked.scatter.v7f32.v7p0f32(<7 x float> %{{.*}}, <7 x float*> %{{.*}}, i32 1, <7 x i1> %{{.*}})
   llvm.intr.masked.scatter %b, %M, %mask { alignment = 1: i32} :
-      !llvm.vec<7 x float>, !llvm.vec<7 x i1> into !llvm.vec<7 x ptr<float>>
+      !llvm.vec<7 x f32>, !llvm.vec<7 x i1> into !llvm.vec<7 x ptr<f32>>
   llvm.return
 }
 
 // CHECK-LABEL: @masked_expand_compress_intrinsics
-llvm.func @masked_expand_compress_intrinsics(%ptr: !llvm.ptr<float>, %mask: !llvm.vec<7 x i1>, %passthru: !llvm.vec<7 x float>) {
+llvm.func @masked_expand_compress_intrinsics(%ptr: !llvm.ptr<f32>, %mask: !llvm.vec<7 x i1>, %passthru: !llvm.vec<7 x f32>) {
   // CHECK: call <7 x float> @llvm.masked.expandload.v7f32(float* %{{.*}}, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %0 = "llvm.intr.masked.expandload"(%ptr, %mask, %passthru)
-    : (!llvm.ptr<float>, !llvm.vec<7 x i1>, !llvm.vec<7 x float>) -> (!llvm.vec<7 x float>)
+    : (!llvm.ptr<f32>, !llvm.vec<7 x i1>, !llvm.vec<7 x f32>) -> (!llvm.vec<7 x f32>)
   // CHECK: call void @llvm.masked.compressstore.v7f32(<7 x float> %{{.*}}, float* %{{.*}}, <7 x i1> %{{.*}})
   "llvm.intr.masked.compressstore"(%0, %ptr, %mask)
-    : (!llvm.vec<7 x float>, !llvm.ptr<float>, !llvm.vec<7 x i1>) -> ()
+    : (!llvm.vec<7 x f32>, !llvm.ptr<f32>, !llvm.vec<7 x i1>) -> ()
   llvm.return
 }
 

diff  --git a/mlir/test/Target/llvmir-invalid.mlir b/mlir/test/Target/llvmir-invalid.mlir
index fcd98ef4b143..1f3bcdf9ddf3 100644
--- a/mlir/test/Target/llvmir-invalid.mlir
+++ b/mlir/test/Target/llvmir-invalid.mlir
@@ -8,28 +8,28 @@ func @foo() {
 // -----
 
 // expected-error @+1 {{llvm.noalias attribute attached to LLVM non-pointer argument}}
-llvm.func @invalid_noalias(%arg0 : !llvm.float {llvm.noalias = true}) -> !llvm.float {
-  llvm.return %arg0 : !llvm.float
+llvm.func @invalid_noalias(%arg0 : f32 {llvm.noalias = true}) -> f32 {
+  llvm.return %arg0 : f32
 }
 
 // -----
 
 // expected-error @+1 {{llvm.sret attribute attached to LLVM non-pointer argument}}
-llvm.func @invalid_noalias(%arg0 : !llvm.float {llvm.sret}) -> !llvm.float {
-  llvm.return %arg0 : !llvm.float
+llvm.func @invalid_noalias(%arg0 : f32 {llvm.sret}) -> f32 {
+  llvm.return %arg0 : f32
 }
 // -----
 
 // expected-error @+1 {{llvm.byval attribute attached to LLVM non-pointer argument}}
-llvm.func @invalid_noalias(%arg0 : !llvm.float {llvm.byval}) -> !llvm.float {
-  llvm.return %arg0 : !llvm.float
+llvm.func @invalid_noalias(%arg0 : f32 {llvm.byval}) -> f32 {
+  llvm.return %arg0 : f32
 }
 
 // -----
 
 // expected-error @+1 {{llvm.align attribute attached to LLVM non-pointer argument}}
-llvm.func @invalid_align(%arg0 : !llvm.float {llvm.align = 4}) -> !llvm.float {
-  llvm.return %arg0 : !llvm.float
+llvm.func @invalid_align(%arg0 : f32 {llvm.align = 4}) -> f32 {
+  llvm.return %arg0 : f32
 }
 
 // -----
@@ -43,7 +43,7 @@ llvm.func @no_nested_struct() -> !llvm.array<2 x array<2 x array<2 x struct<(i32
 // -----
 
 // expected-error @+1 {{unsupported constant value}}
-llvm.mlir.global internal constant @test([2.5, 7.4]) : !llvm.array<2 x double>
+llvm.mlir.global internal constant @test([2.5, 7.4]) : !llvm.array<2 x f64>
 
 // -----
 

diff  --git a/mlir/test/Target/llvmir-types.mlir b/mlir/test/Target/llvmir-types.mlir
index db2d6f8a9a34..5ea83549f041 100644
--- a/mlir/test/Target/llvmir-types.mlir
+++ b/mlir/test/Target/llvmir-types.mlir
@@ -7,13 +7,13 @@
 // CHECK: declare void @return_void()
 llvm.func @return_void() -> !llvm.void
 // CHECK: declare half @return_half()
-llvm.func @return_half() -> !llvm.half
+llvm.func @return_half() -> f16
 // CHECK: declare bfloat @return_bfloat()
-llvm.func @return_bfloat() -> !llvm.bfloat
+llvm.func @return_bfloat() -> bf16
 // CHECK: declare float @return_float()
-llvm.func @return_float() -> !llvm.float
+llvm.func @return_float() -> f32
 // CHECK: declare double @return_double()
-llvm.func @return_double() -> !llvm.double
+llvm.func @return_double() -> f64
 // CHECK: declare fp128 @return_fp128()
 llvm.func @return_fp128() -> !llvm.fp128
 // CHECK: declare x86_fp80 @return_x86_fp80()
@@ -32,7 +32,7 @@ llvm.func @f_void_i32(i32) -> !llvm.void
 // CHECK: declare i32 @f_i32_empty()
 llvm.func @f_i32_empty() -> i32
 // CHECK: declare i32 @f_i32_half_bfloat_float_double(half, bfloat, float, double)
-llvm.func @f_i32_half_bfloat_float_double(!llvm.half, !llvm.bfloat, !llvm.float, !llvm.double) -> i32
+llvm.func @f_i32_half_bfloat_float_double(f16, bf16, f32, f64) -> i32
 // CHECK: declare i32 @f_i32_i32_i32(i32, i32)
 llvm.func @f_i32_i32_i32(i32, i32) -> i32
 // CHECK: declare void @f_void_variadic(...)
@@ -68,7 +68,7 @@ llvm.func @return_i129() -> i129
 // CHECK: declare i8* @return_pi8()
 llvm.func @return_pi8() -> !llvm.ptr<i8>
 // CHECK: declare float* @return_pfloat()
-llvm.func @return_pfloat() -> !llvm.ptr<float>
+llvm.func @return_pfloat() -> !llvm.ptr<f32>
 // CHECK: declare i8** @return_ppi8()
 llvm.func @return_ppi8() -> !llvm.ptr<ptr<i8>>
 // CHECK: declare i8***** @return_pppppi8()
@@ -89,11 +89,11 @@ llvm.func @return_ppi8_42_9() -> !llvm.ptr<ptr<i8, 42>, 9>
 // CHECK: declare <4 x i32> @return_v4_i32()
 llvm.func @return_v4_i32() -> !llvm.vec<4 x i32>
 // CHECK: declare <4 x float> @return_v4_float()
-llvm.func @return_v4_float() -> !llvm.vec<4 x float>
+llvm.func @return_v4_float() -> !llvm.vec<4 x f32>
 // CHECK: declare <vscale x 4 x i32> @return_vs_4_i32()
 llvm.func @return_vs_4_i32() -> !llvm.vec<? x 4 x i32>
 // CHECK: declare <vscale x 8 x half> @return_vs_8_half()
-llvm.func @return_vs_8_half() -> !llvm.vec<? x 8 x half>
+llvm.func @return_vs_8_half() -> !llvm.vec<? x 8 x f16>
 // CHECK: declare <4 x i8*> @return_v_4_pi8()
 llvm.func @return_v_4_pi8() -> !llvm.vec<4 x ptr<i8>>
 
@@ -104,11 +104,11 @@ llvm.func @return_v_4_pi8() -> !llvm.vec<4 x ptr<i8>>
 // CHECK: declare [10 x i32] @return_a10_i32()
 llvm.func @return_a10_i32() -> !llvm.array<10 x i32>
 // CHECK: declare [8 x float] @return_a8_float()
-llvm.func @return_a8_float() -> !llvm.array<8 x float>
+llvm.func @return_a8_float() -> !llvm.array<8 x f32>
 // CHECK: declare [10 x i32 addrspace(4)*] @return_a10_pi32_4()
 llvm.func @return_a10_pi32_4() -> !llvm.array<10 x ptr<i32, 4>>
 // CHECK: declare [10 x [4 x float]] @return_a10_a4_float()
-llvm.func @return_a10_a4_float() -> !llvm.array<10 x array<4 x float>>
+llvm.func @return_a10_a4_float() -> !llvm.array<10 x array<4 x f32>>
 
 //
 // Literal structures.
@@ -119,20 +119,20 @@ llvm.func @return_struct_empty() -> !llvm.struct<()>
 // CHECK: declare { i32 } @return_s_i32()
 llvm.func @return_s_i32() -> !llvm.struct<(i32)>
 // CHECK: declare { float, i32 } @return_s_float_i32()
-llvm.func @return_s_float_i32() -> !llvm.struct<(float, i32)>
+llvm.func @return_s_float_i32() -> !llvm.struct<(f32, i32)>
 // CHECK: declare { { i32 } } @return_s_s_i32()
 llvm.func @return_s_s_i32() -> !llvm.struct<(struct<(i32)>)>
 // CHECK: declare { i32, { i32 }, float } @return_s_i32_s_i32_float()
-llvm.func @return_s_i32_s_i32_float() -> !llvm.struct<(i32, struct<(i32)>, float)>
+llvm.func @return_s_i32_s_i32_float() -> !llvm.struct<(i32, struct<(i32)>, f32)>
 
 // CHECK: declare <{}> @return_sp_empty()
 llvm.func @return_sp_empty() -> !llvm.struct<packed ()>
 // CHECK: declare <{ i32 }> @return_sp_i32()
 llvm.func @return_sp_i32() -> !llvm.struct<packed (i32)>
 // CHECK: declare <{ float, i32 }> @return_sp_float_i32()
-llvm.func @return_sp_float_i32() -> !llvm.struct<packed (float, i32)>
+llvm.func @return_sp_float_i32() -> !llvm.struct<packed (f32, i32)>
 // CHECK: declare <{ i32, { i32, i1 }, float }> @return_sp_i32_s_i31_1_float()
-llvm.func @return_sp_i32_s_i31_1_float() -> !llvm.struct<packed (i32, struct<(i32, i1)>, float)>
+llvm.func @return_sp_i32_s_i31_1_float() -> !llvm.struct<packed (i32, struct<(i32, i1)>, f32)>
 
 // CHECK: declare { <{ i32 }> } @return_s_sp_i32()
 llvm.func @return_s_sp_i32() -> !llvm.struct<(struct<packed (i32)>)>
@@ -161,7 +161,7 @@ llvm.func @return_s_empty() -> !llvm.struct<"empty", ()>
 // CHECK: declare %opaque
 llvm.func @return_s_opaque() -> !llvm.struct<"opaque", opaque>
 // CHECK: declare %long
-llvm.func @return_s_long() -> !llvm.struct<"long", (i32, struct<(i32, i1)>, float, ptr<func<void ()>>)>
+llvm.func @return_s_long() -> !llvm.struct<"long", (i32, struct<(i32, i1)>, f32, ptr<func<void ()>>)>
 // CHECK: declare %self-recursive
 llvm.func @return_s_self_recursive() -> !llvm.struct<"self-recursive", (ptr<struct<"self-recursive">>)>
 // CHECK: declare %unpacked

diff  --git a/mlir/test/Target/llvmir.mlir b/mlir/test/Target/llvmir.mlir
index 223d17172207..5a686bfdee6e 100644
--- a/mlir/test/Target/llvmir.mlir
+++ b/mlir/test/Target/llvmir.mlir
@@ -13,10 +13,10 @@ llvm.mlir.global internal @int_global_array(dense<62> : vector<3xi32>) : !llvm.a
 llvm.mlir.global internal @i32_global_addr_space(62: i32) {addr_space = 7 : i32} : i32
 
 // CHECK: @float_global = internal global float 0.000000e+00
-llvm.mlir.global internal @float_global(0.0: f32) : !llvm.float
+llvm.mlir.global internal @float_global(0.0: f32) : f32
 
 // CHECK: @float_global_array = internal global [1 x float] [float -5.000000e+00]
-llvm.mlir.global internal @float_global_array(dense<[-5.0]> : vector<1xf32>) : !llvm.array<1 x float>
+llvm.mlir.global internal @float_global_array(dense<[-5.0]> : vector<1xf32>) : !llvm.array<1 x f32>
 
 // CHECK: @string_const = internal constant [6 x i8] c"foobar"
 llvm.mlir.global internal constant @string_const("foobar") : !llvm.array<6 x i8>
@@ -414,12 +414,12 @@ llvm.func @memref_alloc() {
   %0 = llvm.mlir.constant(10 : index) : i64
   %1 = llvm.mlir.constant(10 : index) : i64
   %2 = llvm.mul %0, %1 : i64
-  %3 = llvm.mlir.undef : !llvm.struct<(ptr<float>)>
+  %3 = llvm.mlir.undef : !llvm.struct<(ptr<f32>)>
   %4 = llvm.mlir.constant(4 : index) : i64
   %5 = llvm.mul %2, %4 : i64
   %6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr<i8>
-  %7 = llvm.bitcast %6 : !llvm.ptr<i8> to !llvm.ptr<float>
-  %8 = llvm.insertvalue %7, %3[0] : !llvm.struct<(ptr<float>)>
+  %7 = llvm.bitcast %6 : !llvm.ptr<i8> to !llvm.ptr<f32>
+  %8 = llvm.insertvalue %7, %3[0] : !llvm.struct<(ptr<f32>)>
 // CHECK-NEXT: ret void
   llvm.return
 }
@@ -434,13 +434,13 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float* } undef, float* %{{[0-9]+}}, 0
   %0 = llvm.mlir.constant(10 : index) : i64
-  %1 = llvm.mlir.undef : !llvm.struct<(ptr<float>)>
+  %1 = llvm.mlir.undef : !llvm.struct<(ptr<f32>)>
   %2 = llvm.mlir.constant(4 : index) : i64
   %3 = llvm.mul %0, %2 : i64
   %4 = llvm.call @malloc(%3) : (i64) -> !llvm.ptr<i8>
-  %5 = llvm.bitcast %4 : !llvm.ptr<i8> to !llvm.ptr<float>
-  %6 = llvm.insertvalue %5, %1[0] : !llvm.struct<(ptr<float>)>
-  %7 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
+  %5 = llvm.bitcast %4 : !llvm.ptr<i8> to !llvm.ptr<f32>
+  %6 = llvm.insertvalue %5, %1[0] : !llvm.struct<(ptr<f32>)>
+  %7 = llvm.mlir.constant(1.000000e+00 : f32) : f32
   llvm.br ^bb1
 ^bb1:   // pred: ^bb0
   %8 = llvm.mlir.constant(0 : index) : i64
@@ -457,9 +457,9 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
   %12 = llvm.mlir.constant(10 : index) : i64
-  %13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>)>
-  %14 = llvm.getelementptr %13[%10] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  llvm.store %7, %14 : !llvm.ptr<float>
+  %13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
+  %14 = llvm.getelementptr %13[%10] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  llvm.store %7, %14 : !llvm.ptr<f32>
   %15 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %16 = llvm.add %10, %15 : i64
@@ -482,9 +482,9 @@ llvm.func @store_load_static() {
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
   %21 = llvm.mlir.constant(10 : index) : i64
-  %22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>)>
-  %23 = llvm.getelementptr %22[%19] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  %24 = llvm.load %23 : !llvm.ptr<float>
+  %22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>)>
+  %23 = llvm.getelementptr %22[%19] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  %24 = llvm.load %23 : !llvm.ptr<f32>
   %25 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %26 = llvm.add %19, %25 : i64
@@ -502,14 +502,14 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = bitcast i8* %{{[0-9]+}} to float*
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } undef, float* %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
-  %0 = llvm.mlir.undef : !llvm.struct<(ptr<float>, i64)>
+  %0 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
   %1 = llvm.mlir.constant(4 : index) : i64
   %2 = llvm.mul %arg0, %1 : i64
   %3 = llvm.call @malloc(%2) : (i64) -> !llvm.ptr<i8>
-  %4 = llvm.bitcast %3 : !llvm.ptr<i8> to !llvm.ptr<float>
-  %5 = llvm.insertvalue %4, %0[0] : !llvm.struct<(ptr<float>, i64)>
-  %6 = llvm.insertvalue %arg0, %5[1] : !llvm.struct<(ptr<float>, i64)>
-  %7 = llvm.mlir.constant(1.000000e+00 : f32) : !llvm.float
+  %4 = llvm.bitcast %3 : !llvm.ptr<i8> to !llvm.ptr<f32>
+  %5 = llvm.insertvalue %4, %0[0] : !llvm.struct<(ptr<f32>, i64)>
+  %6 = llvm.insertvalue %arg0, %5[1] : !llvm.struct<(ptr<f32>, i64)>
+  %7 = llvm.mlir.constant(1.000000e+00 : f32) : f32
 // CHECK-NEXT: br label %{{[0-9]+}}
   llvm.br ^bb1
 ^bb1:   // pred: ^bb0
@@ -526,10 +526,10 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 1.000000e+00, float* %{{[0-9]+}}
-  %11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<float>, i64)>
-  %12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>, i64)>
-  %13 = llvm.getelementptr %12[%9] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  llvm.store %7, %13 : !llvm.ptr<float>
+  %11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
+  %12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
+  %13 = llvm.getelementptr %12[%9] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  llvm.store %7, %13 : !llvm.ptr<f32>
   %14 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %15 = llvm.add %9, %14 : i64
@@ -551,10 +551,10 @@ llvm.func @store_load_dynamic(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
-  %19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<float>, i64)>
-  %20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<float>, i64)>
-  %21 = llvm.getelementptr %20[%17] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  %22 = llvm.load %21 : !llvm.ptr<float>
+  %19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr<f32>, i64)>
+  %20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr<f32>, i64)>
+  %21 = llvm.getelementptr %20[%17] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  %22 = llvm.load %21 : !llvm.ptr<f32>
   %23 = llvm.mlir.constant(1 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
   %24 = llvm.add %17, %23 : i64
@@ -582,14 +582,14 @@ llvm.func @store_load_mixed(%arg0: i64) {
   %3 = llvm.mul %1, %arg0 : i64
   %4 = llvm.mul %3, %2 : i64
   %5 = llvm.mul %4, %0 : i64
-  %6 = llvm.mlir.undef : !llvm.struct<(ptr<float>, i64, i64)>
+  %6 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64, i64)>
   %7 = llvm.mlir.constant(4 : index) : i64
   %8 = llvm.mul %5, %7 : i64
   %9 = llvm.call @malloc(%8) : (i64) -> !llvm.ptr<i8>
-  %10 = llvm.bitcast %9 : !llvm.ptr<i8> to !llvm.ptr<float>
-  %11 = llvm.insertvalue %10, %6[0] : !llvm.struct<(ptr<float>, i64, i64)>
-  %12 = llvm.insertvalue %arg0, %11[1] : !llvm.struct<(ptr<float>, i64, i64)>
-  %13 = llvm.insertvalue %0, %12[2] : !llvm.struct<(ptr<float>, i64, i64)>
+  %10 = llvm.bitcast %9 : !llvm.ptr<i8> to !llvm.ptr<f32>
+  %11 = llvm.insertvalue %10, %6[0] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %12 = llvm.insertvalue %arg0, %11[1] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %13 = llvm.insertvalue %0, %12[2] : !llvm.struct<(ptr<f32>, i64, i64)>
 
 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
@@ -597,7 +597,7 @@ llvm.func @store_load_mixed(%arg0: i64) {
   %15 = llvm.mlir.constant(2 : index) : i64
   %16 = llvm.call @get_index() : () -> i64
   %17 = llvm.call @get_index() : () -> i64
-  %18 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
+  %18 = llvm.mlir.constant(4.200000e+01 : f32) : f32
   %19 = llvm.mlir.constant(2 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
@@ -610,18 +610,18 @@ llvm.func @store_load_mixed(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
-  %20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<float>, i64, i64)>
+  %20 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
   %21 = llvm.mlir.constant(4 : index) : i64
-  %22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<float>, i64, i64)>
+  %22 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<f32>, i64, i64)>
   %23 = llvm.mul %14, %20 : i64
   %24 = llvm.add %23, %15 : i64
   %25 = llvm.mul %24, %21 : i64
   %26 = llvm.add %25, %16 : i64
   %27 = llvm.mul %26, %22 : i64
   %28 = llvm.add %27, %17 : i64
-  %29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<float>, i64, i64)>
-  %30 = llvm.getelementptr %29[%28] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  llvm.store %18, %30 : !llvm.ptr<float>
+  %29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %30 = llvm.getelementptr %29[%28] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  llvm.store %18, %30 : !llvm.ptr<f32>
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64, i64 } %{{[0-9]+}}, 2
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
@@ -634,43 +634,43 @@ llvm.func @store_load_mixed(%arg0: i64) {
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = load float, float* %{{[0-9]+}}
   %31 = llvm.mlir.constant(2 : index) : i64
-  %32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<float>, i64, i64)>
+  %32 = llvm.extractvalue %13[1] : !llvm.struct<(ptr<f32>, i64, i64)>
   %33 = llvm.mlir.constant(4 : index) : i64
-  %34 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<float>, i64, i64)>
+  %34 = llvm.extractvalue %13[2] : !llvm.struct<(ptr<f32>, i64, i64)>
   %35 = llvm.mul %17, %32 : i64
   %36 = llvm.add %35, %16 : i64
   %37 = llvm.mul %36, %33 : i64
   %38 = llvm.add %37, %15 : i64
   %39 = llvm.mul %38, %34 : i64
   %40 = llvm.add %39, %14 : i64
-  %41 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<float>, i64, i64)>
-  %42 = llvm.getelementptr %41[%40] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  %43 = llvm.load %42 : !llvm.ptr<float>
+  %41 = llvm.extractvalue %13[0] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %42 = llvm.getelementptr %41[%40] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  %43 = llvm.load %42 : !llvm.ptr<f32>
 // CHECK-NEXT: ret void
   llvm.return
 }
 
 // CHECK-LABEL: define { float*, i64 } @memref_args_rets({ float* } {{%.*}}, { float*, i64 } {{%.*}}, { float*, i64 } {{%.*}})
-llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.struct<(ptr<float>, i64)>, %arg2: !llvm.struct<(ptr<float>, i64)>) -> !llvm.struct<(ptr<float>, i64)> {
+llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<f32>)>, %arg1: !llvm.struct<(ptr<f32>, i64)>, %arg2: !llvm.struct<(ptr<f32>, i64)>) -> !llvm.struct<(ptr<f32>, i64)> {
   %0 = llvm.mlir.constant(7 : index) : i64
 // CHECK-NEXT: %{{[0-9]+}} = call i64 @get_index()
   %1 = llvm.call @get_index() : () -> i64
-  %2 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
+  %2 = llvm.mlir.constant(4.200000e+01 : f32) : f32
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float* } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
 // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
   %3 = llvm.mlir.constant(10 : index) : i64
-  %4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr<float>)>
-  %5 = llvm.getelementptr %4[%0] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  llvm.store %2, %5 : !llvm.ptr<float>
+  %4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr<f32>)>
+  %5 = llvm.getelementptr %4[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  llvm.store %2, %5 : !llvm.ptr<f32>
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 0
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 7
 // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
-  %6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr<float>, i64)>
-  %7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr<float>, i64)>
-  %8 = llvm.getelementptr %7[%0] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  llvm.store %2, %8 : !llvm.ptr<float>
+  %6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr<f32>, i64)>
+  %7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr<f32>, i64)>
+  %8 = llvm.getelementptr %7[%0] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  llvm.store %2, %8 : !llvm.ptr<f32>
 // CHECK-NEXT: %{{[0-9]+}} = extractvalue { float*, i64 } %{{[0-9]+}}, 1
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
@@ -678,12 +678,12 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.stru
 // CHECK-NEXT: %{{[0-9]+}} = getelementptr float, float* %{{[0-9]+}}, i64 %{{[0-9]+}}
 // CHECK-NEXT: store float 4.200000e+01, float* %{{[0-9]+}}
   %9 = llvm.mlir.constant(10 : index) : i64
-  %10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr<float>, i64)>
+  %10 = llvm.extractvalue %arg2[1] : !llvm.struct<(ptr<f32>, i64)>
   %11 = llvm.mul %0, %10 : i64
   %12 = llvm.add %11, %1 : i64
-  %13 = llvm.extractvalue %arg2[0] : !llvm.struct<(ptr<float>, i64)>
-  %14 = llvm.getelementptr %13[%12] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  llvm.store %2, %14 : !llvm.ptr<float>
+  %13 = llvm.extractvalue %arg2[0] : !llvm.struct<(ptr<f32>, i64)>
+  %14 = llvm.getelementptr %13[%12] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  llvm.store %2, %14 : !llvm.ptr<f32>
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}}
 // CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
 // CHECK-NEXT: %{{[0-9]+}} = call i8* @malloc(i64 %{{[0-9]+}})
@@ -692,28 +692,28 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr<float>)>, %arg1: !llvm.stru
 // CHECK-NEXT: %{{[0-9]+}} = insertvalue { float*, i64 } %{{[0-9]+}}, i64 %{{[0-9]+}}, 1
   %15 = llvm.mlir.constant(10 : index) : i64
   %16 = llvm.mul %15, %1 : i64
-  %17 = llvm.mlir.undef : !llvm.struct<(ptr<float>, i64)>
+  %17 = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64)>
   %18 = llvm.mlir.constant(4 : index) : i64
   %19 = llvm.mul %16, %18 : i64
   %20 = llvm.call @malloc(%19) : (i64) -> !llvm.ptr<i8>
-  %21 = llvm.bitcast %20 : !llvm.ptr<i8> to !llvm.ptr<float>
-  %22 = llvm.insertvalue %21, %17[0] : !llvm.struct<(ptr<float>, i64)>
-  %23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr<float>, i64)>
+  %21 = llvm.bitcast %20 : !llvm.ptr<i8> to !llvm.ptr<f32>
+  %22 = llvm.insertvalue %21, %17[0] : !llvm.struct<(ptr<f32>, i64)>
+  %23 = llvm.insertvalue %1, %22[1] : !llvm.struct<(ptr<f32>, i64)>
 // CHECK-NEXT: ret { float*, i64 } %{{[0-9]+}}
-  llvm.return %23 : !llvm.struct<(ptr<float>, i64)>
+  llvm.return %23 : !llvm.struct<(ptr<f32>, i64)>
 }
 
 
 // CHECK-LABEL: define i64 @memref_dim({ float*, i64, i64 } {{%.*}})
-llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<float>, i64, i64)>) -> i64 {
+llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<f32>, i64, i64)>) -> i64 {
 // Expecting this to create an LLVM constant.
   %0 = llvm.mlir.constant(42 : index) : i64
 // CHECK-NEXT: %2 = extractvalue { float*, i64, i64 } %0, 1
-  %1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr<float>, i64, i64)>
+  %1 = llvm.extractvalue %arg0[1] : !llvm.struct<(ptr<f32>, i64, i64)>
 // Expecting this to create an LLVM constant.
   %2 = llvm.mlir.constant(10 : index) : i64
 // CHECK-NEXT: %3 = extractvalue { float*, i64, i64 } %0, 2
-  %3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr<float>, i64, i64)>
+  %3 = llvm.extractvalue %arg0[2] : !llvm.struct<(ptr<f32>, i64, i64)>
 // Checking that the constant for d0 has been created.
 // CHECK-NEXT: %4 = add i64 42, %2
   %4 = llvm.add %0, %1 : i64
@@ -727,23 +727,23 @@ llvm.func @memref_dim(%arg0: !llvm.struct<(ptr<float>, i64, i64)>) -> i64 {
 }
 
 llvm.func @get_i64() -> i64
-llvm.func @get_f32() -> !llvm.float
-llvm.func @get_memref() -> !llvm.struct<(ptr<float>, i64, i64)>
+llvm.func @get_f32() -> f32
+llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, i64, i64)>
 
 // CHECK-LABEL: define { i64, float, { float*, i64, i64 } } @multireturn()
-llvm.func @multireturn() -> !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)> {
+llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)> {
   %0 = llvm.call @get_i64() : () -> i64
-  %1 = llvm.call @get_f32() : () -> !llvm.float
-  %2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr<float>, i64, i64)>
+  %1 = llvm.call @get_f32() : () -> f32
+  %2 = llvm.call @get_memref() : () -> !llvm.struct<(ptr<f32>, i64, i64)>
 // CHECK:        %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } undef, i64 %{{[0-9]+}}, 0
 // CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, float %{{[0-9]+}}, 1
 // CHECK-NEXT:   %{{[0-9]+}} = insertvalue { i64, float, { float*, i64, i64 } } %{{[0-9]+}}, { float*, i64, i64 } %{{[0-9]+}}, 2
 // CHECK-NEXT:   ret { i64, float, { float*, i64, i64 } } %{{[0-9]+}}
-  %3 = llvm.mlir.undef : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
-  %4 = llvm.insertvalue %0, %3[0] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
-  %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
-  %6 = llvm.insertvalue %2, %5[2] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
-  llvm.return %6 : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
+  %3 = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %4 = llvm.insertvalue %0, %3[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %6 = llvm.insertvalue %2, %5[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  llvm.return %6 : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
 }
 
 
@@ -753,41 +753,41 @@ llvm.func @multireturn_caller() {
 // CHECK-NEXT:   [[ret0:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 0
 // CHECK-NEXT:   [[ret1:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 1
 // CHECK-NEXT:   [[ret2:%[0-9]+]] = extractvalue { i64, float, { float*, i64, i64 } } %1, 2
-  %0 = llvm.call @multireturn() : () -> !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
-  %1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
-  %2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
-  %3 = llvm.extractvalue %0[2] : !llvm.struct<(i64, float, struct<(ptr<float>, i64, i64)>)>
+  %0 = llvm.call @multireturn() : () -> !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %1 = llvm.extractvalue %0[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %2 = llvm.extractvalue %0[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
+  %3 = llvm.extractvalue %0[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, i64, i64)>)>
   %4 = llvm.mlir.constant(42) : i64
 // CHECK:   add i64 [[ret0]], 42
   %5 = llvm.add %1, %4 : i64
-  %6 = llvm.mlir.constant(4.200000e+01 : f32) : !llvm.float
+  %6 = llvm.mlir.constant(4.200000e+01 : f32) : f32
 // CHECK:   fadd float [[ret1]], 4.200000e+01
-  %7 = llvm.fadd %2, %6 : !llvm.float
+  %7 = llvm.fadd %2, %6 : f32
   %8 = llvm.mlir.constant(0 : index) : i64
   %9 = llvm.mlir.constant(42 : index) : i64
 // CHECK:   extractvalue { float*, i64, i64 } [[ret2]], 0
-  %10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr<float>, i64, i64)>
+  %10 = llvm.extractvalue %3[1] : !llvm.struct<(ptr<f32>, i64, i64)>
   %11 = llvm.mlir.constant(10 : index) : i64
-  %12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr<float>, i64, i64)>
+  %12 = llvm.extractvalue %3[2] : !llvm.struct<(ptr<f32>, i64, i64)>
   %13 = llvm.mul %8, %10 : i64
   %14 = llvm.add %13, %8 : i64
   %15 = llvm.mul %14, %11 : i64
   %16 = llvm.add %15, %8 : i64
   %17 = llvm.mul %16, %12 : i64
   %18 = llvm.add %17, %8 : i64
-  %19 = llvm.extractvalue %3[0] : !llvm.struct<(ptr<float>, i64, i64)>
-  %20 = llvm.getelementptr %19[%18] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  %21 = llvm.load %20 : !llvm.ptr<float>
+  %19 = llvm.extractvalue %3[0] : !llvm.struct<(ptr<f32>, i64, i64)>
+  %20 = llvm.getelementptr %19[%18] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  %21 = llvm.load %20 : !llvm.ptr<f32>
   llvm.return
 }
 
 // CHECK-LABEL: define <4 x float> @vector_ops(<4 x float> {{%.*}}, <4 x i1> {{%.*}}, <4 x i64> {{%.*}})
-llvm.func @vector_ops(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.vec<4 x i1>, %arg2: !llvm.vec<4 x i64>) -> !llvm.vec<4 x float> {
-  %0 = llvm.mlir.constant(dense<4.200000e+01> : vector<4xf32>) : !llvm.vec<4 x float>
+llvm.func @vector_ops(%arg0: !llvm.vec<4 x f32>, %arg1: !llvm.vec<4 x i1>, %arg2: !llvm.vec<4 x i64>) -> !llvm.vec<4 x f32> {
+  %0 = llvm.mlir.constant(dense<4.200000e+01> : vector<4xf32>) : !llvm.vec<4 x f32>
 // CHECK-NEXT: %4 = fadd <4 x float> %0, <float 4.200000e+01, float 4.200000e+01, float 4.200000e+01, float 4.200000e+01>
-  %1 = llvm.fadd %arg0, %0 : !llvm.vec<4 x float>
+  %1 = llvm.fadd %arg0, %0 : !llvm.vec<4 x f32>
 // CHECK-NEXT: %5 = select <4 x i1> %1, <4 x float> %4, <4 x float> %0
-  %2 = llvm.select %arg1, %1, %arg0 : !llvm.vec<4 x i1>, !llvm.vec<4 x float>
+  %2 = llvm.select %arg1, %1, %arg0 : !llvm.vec<4 x i1>, !llvm.vec<4 x f32>
 // CHECK-NEXT: %6 = sdiv <4 x i64> %2, %2
   %3 = llvm.sdiv %arg2, %arg2 : !llvm.vec<4 x i64>
 // CHECK-NEXT: %7 = udiv <4 x i64> %2, %2
@@ -797,9 +797,9 @@ llvm.func @vector_ops(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.vec<4 x i1>, %ar
 // CHECK-NEXT: %9 = urem <4 x i64> %2, %2
   %6 = llvm.urem %arg2, %arg2 : !llvm.vec<4 x i64>
 // CHECK-NEXT: %10 = fdiv <4 x float> %0, <float 4.200000e+01, float 4.200000e+01, float 4.200000e+01, float 4.200000e+01>
-  %7 = llvm.fdiv %arg0, %0 : !llvm.vec<4 x float>
+  %7 = llvm.fdiv %arg0, %0 : !llvm.vec<4 x f32>
 // CHECK-NEXT: %11 = frem <4 x float> %0, <float 4.200000e+01, float 4.200000e+01, float 4.200000e+01, float 4.200000e+01>
-  %8 = llvm.frem %arg0, %0 : !llvm.vec<4 x float>
+  %8 = llvm.frem %arg0, %0 : !llvm.vec<4 x f32>
 // CHECK-NEXT: %12 = and <4 x i64> %2, %2
   %9 = llvm.and %arg2, %arg2 : !llvm.vec<4 x i64>
 // CHECK-NEXT: %13 = or <4 x i64> %2, %2
@@ -813,41 +813,41 @@ llvm.func @vector_ops(%arg0: !llvm.vec<4 x float>, %arg1: !llvm.vec<4 x i1>, %ar
 // CHECK-NEXT: %17 = ashr <4 x i64> %2, %2
   %14 = llvm.ashr %arg2, %arg2 : !llvm.vec<4 x i64>
 // CHECK-NEXT:    ret <4 x float> %4
-  llvm.return %1 : !llvm.vec<4 x float>
+  llvm.return %1 : !llvm.vec<4 x f32>
 }
 
 // CHECK-LABEL: @vector_splat_1d
-llvm.func @vector_splat_1d() -> !llvm.vec<4 x float> {
+llvm.func @vector_splat_1d() -> !llvm.vec<4 x f32> {
   // CHECK: ret <4 x float> zeroinitializer
-  %0 = llvm.mlir.constant(dense<0.000000e+00> : vector<4xf32>) : !llvm.vec<4 x float>
-  llvm.return %0 : !llvm.vec<4 x float>
+  %0 = llvm.mlir.constant(dense<0.000000e+00> : vector<4xf32>) : !llvm.vec<4 x f32>
+  llvm.return %0 : !llvm.vec<4 x f32>
 }
 
 // CHECK-LABEL: @vector_splat_2d
-llvm.func @vector_splat_2d() -> !llvm.array<4 x vec<16 x float>> {
+llvm.func @vector_splat_2d() -> !llvm.array<4 x vec<16 x f32>> {
   // CHECK: ret [4 x <16 x float>] zeroinitializer
-  %0 = llvm.mlir.constant(dense<0.000000e+00> : vector<4x16xf32>) : !llvm.array<4 x vec<16 x float>>
-  llvm.return %0 : !llvm.array<4 x vec<16 x float>>
+  %0 = llvm.mlir.constant(dense<0.000000e+00> : vector<4x16xf32>) : !llvm.array<4 x vec<16 x f32>>
+  llvm.return %0 : !llvm.array<4 x vec<16 x f32>>
 }
 
 // CHECK-LABEL: @vector_splat_3d
-llvm.func @vector_splat_3d() -> !llvm.array<4 x array<16 x vec<4 x float>>> {
+llvm.func @vector_splat_3d() -> !llvm.array<4 x array<16 x vec<4 x f32>>> {
   // CHECK: ret [4 x [16 x <4 x float>]] zeroinitializer
-  %0 = llvm.mlir.constant(dense<0.000000e+00> : vector<4x16x4xf32>) : !llvm.array<4 x array<16 x vec<4 x float>>>
-  llvm.return %0 : !llvm.array<4 x array<16 x vec<4 x float>>>
+  %0 = llvm.mlir.constant(dense<0.000000e+00> : vector<4x16x4xf32>) : !llvm.array<4 x array<16 x vec<4 x f32>>>
+  llvm.return %0 : !llvm.array<4 x array<16 x vec<4 x f32>>>
 }
 
 // CHECK-LABEL: @vector_splat_nonzero
-llvm.func @vector_splat_nonzero() -> !llvm.vec<4 x float> {
+llvm.func @vector_splat_nonzero() -> !llvm.vec<4 x f32> {
   // CHECK: ret <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
-  %0 = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x float>
-  llvm.return %0 : !llvm.vec<4 x float>
+  %0 = llvm.mlir.constant(dense<1.000000e+00> : vector<4xf32>) : !llvm.vec<4 x f32>
+  llvm.return %0 : !llvm.vec<4 x f32>
 }
 
 // CHECK-LABEL: @ops
-llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32, %arg3: i32) -> !llvm.struct<(float, i32)> {
+llvm.func @ops(%arg0: f32, %arg1: f32, %arg2: i32, %arg3: i32) -> !llvm.struct<(f32, i32)> {
 // CHECK-NEXT: fsub float %0, %1
-  %0 = llvm.fsub %arg0, %arg1 : !llvm.float
+  %0 = llvm.fsub %arg0, %arg1 : f32
 // CHECK-NEXT: %6 = sub i32 %2, %3
   %1 = llvm.sub %arg2, %arg3 : i32
 // CHECK-NEXT: %7 = icmp slt i32 %2, %6
@@ -863,14 +863,14 @@ llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32, %arg3: i32) -
 // CHECK-NEXT: %12 = urem i32 %2, %3
   %7 = llvm.urem %arg2, %arg3 : i32
 
-  %8 = llvm.mlir.undef : !llvm.struct<(float, i32)>
-  %9 = llvm.insertvalue %0, %8[0] : !llvm.struct<(float, i32)>
-  %10 = llvm.insertvalue %3, %9[1] : !llvm.struct<(float, i32)>
+  %8 = llvm.mlir.undef : !llvm.struct<(f32, i32)>
+  %9 = llvm.insertvalue %0, %8[0] : !llvm.struct<(f32, i32)>
+  %10 = llvm.insertvalue %3, %9[1] : !llvm.struct<(f32, i32)>
 
 // CHECK: %15 = fdiv float %0, %1
-  %11 = llvm.fdiv %arg0, %arg1 : !llvm.float
+  %11 = llvm.fdiv %arg0, %arg1 : f32
 // CHECK-NEXT: %16 = frem float %0, %1
-  %12 = llvm.frem %arg0, %arg1 : !llvm.float
+  %12 = llvm.frem %arg0, %arg1 : f32
 
 // CHECK-NEXT: %17 = and i32 %2, %3
   %13 = llvm.and %arg2, %arg3 : i32
@@ -886,9 +886,9 @@ llvm.func @ops(%arg0: !llvm.float, %arg1: !llvm.float, %arg2: i32, %arg3: i32) -
   %18 = llvm.ashr %arg2, %arg3 : i32
 
 // CHECK-NEXT: fneg float %0
-  %19 = llvm.fneg %arg0 : !llvm.float
+  %19 = llvm.fneg %arg0 : f32
 
-  llvm.return %10 : !llvm.struct<(float, i32)>
+  llvm.return %10 : !llvm.struct<(f32, i32)>
 }
 
 //
@@ -905,9 +905,9 @@ llvm.func @indirect_const_call(%arg0: i64) {
 }
 
 // CHECK-LABEL: define i32 @indirect_call(i32 (float)* {{%.*}}, float {{%.*}})
-llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (float)>>, %arg1: !llvm.float) -> i32 {
+llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (f32)>>, %arg1: f32) -> i32 {
 // CHECK-NEXT:  %3 = call i32 %0(float %1)
-  %0 = llvm.call %arg0(%arg1) : (!llvm.float) -> i32
+  %0 = llvm.call %arg0(%arg1) : (f32) -> i32
 // CHECK-NEXT:  ret i32 %3
   llvm.return %0 : i32
 }
@@ -935,12 +935,12 @@ llvm.func @cond_br_arguments(%arg0: i1, %arg1: i1) {
 }
 
 // CHECK-LABEL: define void @llvm_noalias(float* noalias {{%*.}})
-llvm.func @llvm_noalias(%arg0: !llvm.ptr<float> {llvm.noalias = true}) {
+llvm.func @llvm_noalias(%arg0: !llvm.ptr<f32> {llvm.noalias = true}) {
   llvm.return
 }
 
 // CHECK-LABEL: define void @llvm_align(float* align 4 {{%*.}})
-llvm.func @llvm_align(%arg0: !llvm.ptr<float> {llvm.align = 4}) {
+llvm.func @llvm_align(%arg0: !llvm.ptr<f32> {llvm.align = 4}) {
   llvm.return
 }
 
@@ -960,10 +960,10 @@ llvm.func @fpconversion(%arg0 : i32) -> i32 {
 // CHECK-NEXT: %3 = fptosi float %2 to i32
 // CHECK-NEXT: %4 = uitofp i32 %3 to float
 // CHECK-NEXT: %5 = fptoui float %4 to i32
-  %1 = llvm.sitofp %arg0 : i32 to !llvm.float
-  %2 = llvm.fptosi %1 : !llvm.float to i32
-  %3 = llvm.uitofp %2 : i32 to !llvm.float
-  %4 = llvm.fptoui %3 : !llvm.float to i32
+  %1 = llvm.sitofp %arg0 : i32 to f32
+  %2 = llvm.fptosi %1 : f32 to i32
+  %3 = llvm.uitofp %2 : i32 to f32
+  %4 = llvm.fptoui %3 : f32 to i32
   llvm.return %4 : i32
 }
 
@@ -986,7 +986,7 @@ llvm.func @noreach() {
 }
 
 // CHECK-LABEL: define void @fcmp
-llvm.func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
+llvm.func @fcmp(%arg0: f32, %arg1: f32) {
   // CHECK: fcmp oeq float %0, %1
   // CHECK-NEXT: fcmp ogt float %0, %1
   // CHECK-NEXT: fcmp oge float %0, %1
@@ -1001,40 +1001,40 @@ llvm.func @fcmp(%arg0: !llvm.float, %arg1: !llvm.float) {
   // CHECK-NEXT: fcmp ule float %0, %1
   // CHECK-NEXT: fcmp une float %0, %1
   // CHECK-NEXT: fcmp uno float %0, %1
-  %0 = llvm.fcmp "oeq" %arg0, %arg1 : !llvm.float
-  %1 = llvm.fcmp "ogt" %arg0, %arg1 : !llvm.float
-  %2 = llvm.fcmp "oge" %arg0, %arg1 : !llvm.float
-  %3 = llvm.fcmp "olt" %arg0, %arg1 : !llvm.float
-  %4 = llvm.fcmp "ole" %arg0, %arg1 : !llvm.float
-  %5 = llvm.fcmp "one" %arg0, %arg1 : !llvm.float
-  %6 = llvm.fcmp "ord" %arg0, %arg1 : !llvm.float
-  %7 = llvm.fcmp "ueq" %arg0, %arg1 : !llvm.float
-  %8 = llvm.fcmp "ugt" %arg0, %arg1 : !llvm.float
-  %9 = llvm.fcmp "uge" %arg0, %arg1 : !llvm.float
-  %10 = llvm.fcmp "ult" %arg0, %arg1 : !llvm.float
-  %11 = llvm.fcmp "ule" %arg0, %arg1 : !llvm.float
-  %12 = llvm.fcmp "une" %arg0, %arg1 : !llvm.float
-  %13 = llvm.fcmp "uno" %arg0, %arg1 : !llvm.float
+  %0 = llvm.fcmp "oeq" %arg0, %arg1 : f32
+  %1 = llvm.fcmp "ogt" %arg0, %arg1 : f32
+  %2 = llvm.fcmp "oge" %arg0, %arg1 : f32
+  %3 = llvm.fcmp "olt" %arg0, %arg1 : f32
+  %4 = llvm.fcmp "ole" %arg0, %arg1 : f32
+  %5 = llvm.fcmp "one" %arg0, %arg1 : f32
+  %6 = llvm.fcmp "ord" %arg0, %arg1 : f32
+  %7 = llvm.fcmp "ueq" %arg0, %arg1 : f32
+  %8 = llvm.fcmp "ugt" %arg0, %arg1 : f32
+  %9 = llvm.fcmp "uge" %arg0, %arg1 : f32
+  %10 = llvm.fcmp "ult" %arg0, %arg1 : f32
+  %11 = llvm.fcmp "ule" %arg0, %arg1 : f32
+  %12 = llvm.fcmp "une" %arg0, %arg1 : f32
+  %13 = llvm.fcmp "uno" %arg0, %arg1 : f32
   llvm.return
 }
 
 // CHECK-LABEL: @vect
-llvm.func @vect(%arg0: !llvm.vec<4 x float>, %arg1: i32, %arg2: !llvm.float) {
+llvm.func @vect(%arg0: !llvm.vec<4 x f32>, %arg1: i32, %arg2: f32) {
   // CHECK-NEXT: extractelement <4 x float> {{.*}}, i32
   // CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i32
   // CHECK-NEXT: shufflevector <4 x float> {{.*}}, <4 x float> {{.*}}, <5 x i32> <i32 0, i32 0, i32 0, i32 0, i32 7>
-  %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x float>
-  %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x float>
-  %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x float>, !llvm.vec<4 x float>
+  %0 = llvm.extractelement %arg0[%arg1 : i32] : !llvm.vec<4 x f32>
+  %1 = llvm.insertelement %arg2, %arg0[%arg1 : i32] : !llvm.vec<4 x f32>
+  %2 = llvm.shufflevector %arg0, %arg0 [0 : i32, 0 : i32, 0 : i32, 0 : i32, 7 : i32] : !llvm.vec<4 x f32>, !llvm.vec<4 x f32>
   llvm.return
 }
 
 // CHECK-LABEL: @vect_i64idx
-llvm.func @vect_i64idx(%arg0: !llvm.vec<4 x float>, %arg1: i64, %arg2: !llvm.float) {
+llvm.func @vect_i64idx(%arg0: !llvm.vec<4 x f32>, %arg1: i64, %arg2: f32) {
   // CHECK-NEXT: extractelement <4 x float> {{.*}}, i64
   // CHECK-NEXT: insertelement <4 x float> {{.*}}, float %2, i64
-  %0 = llvm.extractelement %arg0[%arg1 : i64] : !llvm.vec<4 x float>
-  %1 = llvm.insertelement %arg2, %arg0[%arg1 : i64] : !llvm.vec<4 x float>
+  %0 = llvm.extractelement %arg0[%arg1 : i64] : !llvm.vec<4 x f32>
+  %1 = llvm.insertelement %arg2, %arg0[%arg1 : i64] : !llvm.vec<4 x f32>
   llvm.return
 }
 
@@ -1050,20 +1050,20 @@ llvm.func @alloca(%size : i64) {
 }
 
 // CHECK-LABEL: @constants
-llvm.func @constants() -> !llvm.vec<4 x float> {
+llvm.func @constants() -> !llvm.vec<4 x f32> {
   // CHECK: ret <4 x float> <float 4.2{{0*}}e+01, float 0.{{0*}}e+00, float 0.{{0*}}e+00, float 0.{{0*}}e+00>
-  %0 = llvm.mlir.constant(sparse<[[0]], [4.2e+01]> : vector<4xf32>) : !llvm.vec<4 x float>
-  llvm.return %0 : !llvm.vec<4 x float>
+  %0 = llvm.mlir.constant(sparse<[[0]], [4.2e+01]> : vector<4xf32>) : !llvm.vec<4 x f32>
+  llvm.return %0 : !llvm.vec<4 x f32>
 }
 
 // CHECK-LABEL: @fp_casts
-llvm.func @fp_casts(%fp1 : !llvm.float, %fp2 : !llvm.double) -> i16 {
+llvm.func @fp_casts(%fp1 : f32, %fp2 : f64) -> i16 {
 // CHECK:    fptrunc double {{.*}} to float
-  %a = llvm.fptrunc %fp2 : !llvm.double to !llvm.float
+  %a = llvm.fptrunc %fp2 : f64 to f32
 // CHECK:    fpext float {{.*}} to double
-  %b = llvm.fpext %fp1 : !llvm.float to !llvm.double
+  %b = llvm.fpext %fp1 : f32 to f64
 // CHECK:    fptosi double {{.*}} to i16
-  %c = llvm.fptosi %b : !llvm.double to i16
+  %c = llvm.fptosi %b : f64 to i16
   llvm.return %c : i16
 }
 
@@ -1107,14 +1107,14 @@ llvm.func @elements_constant_3d_array() -> !llvm.array<2 x array<2 x array<2 x i
 
 // CHECK-LABEL: @atomicrmw
 llvm.func @atomicrmw(
-    %f32_ptr : !llvm.ptr<float>, %f32 : !llvm.float,
+    %f32_ptr : !llvm.ptr<f32>, %f32 : f32,
     %i32_ptr : !llvm.ptr<i32>, %i32 : i32) {
   // CHECK: atomicrmw fadd float* %{{.*}}, float %{{.*}} unordered
-  %0 = llvm.atomicrmw fadd %f32_ptr, %f32 unordered : !llvm.float
+  %0 = llvm.atomicrmw fadd %f32_ptr, %f32 unordered : f32
   // CHECK: atomicrmw fsub float* %{{.*}}, float %{{.*}} unordered
-  %1 = llvm.atomicrmw fsub %f32_ptr, %f32 unordered : !llvm.float
+  %1 = llvm.atomicrmw fsub %f32_ptr, %f32 unordered : f32
   // CHECK: atomicrmw xchg float* %{{.*}}, float %{{.*}} monotonic
-  %2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.float
+  %2 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : f32
   // CHECK: atomicrmw add i32* %{{.*}}, i32 %{{.*}} acquire
   %3 = llvm.atomicrmw add %i32_ptr, %i32 acquire : i32
   // CHECK: atomicrmw sub i32* %{{.*}}, i32 %{{.*}} release
@@ -1235,9 +1235,9 @@ llvm.func @passthrough() attributes {passthrough = ["noinline", ["alignstack", "
 // -----
 
 // CHECK-LABEL: @constant_bf16
-llvm.func @constant_bf16() -> !llvm.bfloat {
-  %0 = llvm.mlir.constant(1.000000e+01 : bf16) : !llvm.bfloat
-  llvm.return %0 : !llvm.bfloat
+llvm.func @constant_bf16() -> bf16 {
+  %0 = llvm.mlir.constant(1.000000e+01 : bf16) : bf16
+  llvm.return %0 : bf16
 }
 
 // CHECK: ret bfloat 0xR4120
@@ -1360,26 +1360,26 @@ llvm.func @useInlineAsm(%arg0: i32) {
 
 // -----
 
-llvm.func @fastmathFlagsFunc(!llvm.float) -> !llvm.float
+llvm.func @fastmathFlagsFunc(f32) -> f32
 
 // CHECK-LABEL: @fastmathFlags
-llvm.func @fastmathFlags(%arg0: !llvm.float) {
+llvm.func @fastmathFlags(%arg0: f32) {
 // CHECK: {{.*}} = fadd nnan ninf float {{.*}}, {{.*}}
 // CHECK: {{.*}} = fsub nnan ninf float {{.*}}, {{.*}}
 // CHECK: {{.*}} = fmul nnan ninf float {{.*}}, {{.*}}
 // CHECK: {{.*}} = fdiv nnan ninf float {{.*}}, {{.*}}
 // CHECK: {{.*}} = frem nnan ninf float {{.*}}, {{.*}}
-  %0 = llvm.fadd %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
-  %1 = llvm.fsub %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
-  %2 = llvm.fmul %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
-  %3 = llvm.fdiv %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
-  %4 = llvm.frem %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
+  %0 = llvm.fadd %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
+  %1 = llvm.fsub %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
+  %2 = llvm.fmul %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
+  %3 = llvm.fdiv %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
+  %4 = llvm.frem %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
 
 // CHECK: {{.*}} = fcmp nnan ninf oeq {{.*}}, {{.*}}
-  %5 = llvm.fcmp "oeq" %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
+  %5 = llvm.fcmp "oeq" %arg0, %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
 
 // CHECK: {{.*}} = fneg nnan ninf float {{.*}}
-  %6 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : !llvm.float
+  %6 = llvm.fneg %arg0 {fastmathFlags = #llvm.fastmath<nnan, ninf>} : f32
 
 // CHECK: {{.*}} = call float @fastmathFlagsFunc({{.*}})
 // CHECK: {{.*}} = call nnan float @fastmathFlagsFunc({{.*}})
@@ -1390,15 +1390,15 @@ llvm.func @fastmathFlags(%arg0: !llvm.float) {
 // CHECK: {{.*}} = call afn float @fastmathFlagsFunc({{.*}})
 // CHECK: {{.*}} = call reassoc float @fastmathFlagsFunc({{.*}})
 // CHECK: {{.*}} = call fast float @fastmathFlagsFunc({{.*}})
-  %8 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<>} : (!llvm.float) -> (!llvm.float)
-  %9 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<nnan>} : (!llvm.float) -> (!llvm.float)
-  %10 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<ninf>} : (!llvm.float) -> (!llvm.float)
-  %11 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<nsz>} : (!llvm.float) -> (!llvm.float)
-  %12 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<arcp>} : (!llvm.float) -> (!llvm.float)
-  %13 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<contract>} : (!llvm.float) -> (!llvm.float)
-  %14 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<afn>} : (!llvm.float) -> (!llvm.float)
-  %15 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<reassoc>} : (!llvm.float) -> (!llvm.float)
-  %16 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<fast>} : (!llvm.float) -> (!llvm.float)
+  %8 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<>} : (f32) -> (f32)
+  %9 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<nnan>} : (f32) -> (f32)
+  %10 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<ninf>} : (f32) -> (f32)
+  %11 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<nsz>} : (f32) -> (f32)
+  %12 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<arcp>} : (f32) -> (f32)
+  %13 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<contract>} : (f32) -> (f32)
+  %14 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> (f32)
+  %15 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<reassoc>} : (f32) -> (f32)
+  %16 = llvm.call @fastmathFlagsFunc(%arg0) {fastmathFlags = #llvm.fastmath<fast>} : (f32) -> (f32)
   llvm.return
 }
 

diff  --git a/mlir/test/Target/nvvmir.mlir b/mlir/test/Target/nvvmir.mlir
index 24642b8d1154..63dd200be9d2 100644
--- a/mlir/test/Target/nvvmir.mlir
+++ b/mlir/test/Target/nvvmir.mlir
@@ -40,21 +40,21 @@ llvm.func @llvm.nvvm.barrier0() {
 
 llvm.func @nvvm_shfl(
     %0 : i32, %1 : i32, %2 : i32,
-    %3 : i32, %4 : !llvm.float) -> i32 {
+    %3 : i32, %4 : f32) -> i32 {
   // CHECK: call i32 @llvm.nvvm.shfl.sync.bfly.i32(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 : i32
   // CHECK: call float @llvm.nvvm.shfl.sync.bfly.f32(i32 %{{.*}}, float %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
-  %7 = nvvm.shfl.sync.bfly %0, %4, %1, %2 : !llvm.float
+  %7 = nvvm.shfl.sync.bfly %0, %4, %1, %2 : f32
   llvm.return %6 : i32
 }
 
 llvm.func @nvvm_shfl_pred(
     %0 : i32, %1 : i32, %2 : i32,
-    %3 : i32, %4 : !llvm.float) -> !llvm.struct<(i32, i1)> {
+    %3 : i32, %4 : f32) -> !llvm.struct<(i32, i1)> {
   // CHECK: call { i32, i1 } @llvm.nvvm.shfl.sync.bfly.i32p(i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %6 = nvvm.shfl.sync.bfly %0, %3, %1, %2 {return_value_and_is_valid} : !llvm.struct<(i32, i1)>
   // CHECK: call { float, i1 } @llvm.nvvm.shfl.sync.bfly.f32p(i32 %{{.*}}, float %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
-  %7 = nvvm.shfl.sync.bfly %0, %4, %1, %2 {return_value_and_is_valid} : !llvm.struct<(float, i1)>
+  %7 = nvvm.shfl.sync.bfly %0, %4, %1, %2 {return_value_and_is_valid} : !llvm.struct<(f32, i1)>
   llvm.return %6 : !llvm.struct<(i32, i1)>
 }
 
@@ -64,13 +64,13 @@ llvm.func @nvvm_vote(%0 : i32, %1 : i1) -> i32 {
   llvm.return %3 : i32
 }
 
-llvm.func @nvvm_mma(%a0 : !llvm.vec<2 x half>, %a1 : !llvm.vec<2 x half>,
-                    %b0 : !llvm.vec<2 x half>, %b1 : !llvm.vec<2 x half>,
-                    %c0 : !llvm.float, %c1 : !llvm.float, %c2 : !llvm.float, %c3 : !llvm.float,
-                    %c4 : !llvm.float, %c5 : !llvm.float, %c6 : !llvm.float, %c7 : !llvm.float) {
+llvm.func @nvvm_mma(%a0 : !llvm.vec<2 x f16>, %a1 : !llvm.vec<2 x f16>,
+                    %b0 : !llvm.vec<2 x f16>, %b1 : !llvm.vec<2 x f16>,
+                    %c0 : f32, %c1 : f32, %c2 : f32, %c3 : f32,
+                    %c4 : f32, %c5 : f32, %c6 : f32, %c7 : f32) {
   // CHECK: call { float, float, float, float, float, float, float, float } @llvm.nvvm.mma.m8n8k4.row.col.f32.f32
-  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.vec<2 x half>, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float, !llvm.float) -> !llvm.struct<(float, float, float, float, float, float, float, float)>
-  llvm.return %0 : !llvm.struct<(float, float, float, float, float, float, float, float)>
+  %0 = nvvm.mma.sync %a0, %a1, %b0, %b1, %c0, %c1, %c2, %c3, %c4, %c5, %c6, %c7 {alayout="row", blayout="col"} : (!llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, !llvm.vec<2 x f16>, f32, f32, f32, f32, f32, f32, f32, f32) -> !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
+  llvm.return %0 : !llvm.struct<(f32, f32, f32, f32, f32, f32, f32, f32)>
 }
 
 // This function has the "kernel" attribute attached and should appear in the

diff  --git a/mlir/test/Target/openmp-llvm.mlir b/mlir/test/Target/openmp-llvm.mlir
index 0e7ace7ec91f..d8bda22fc93c 100644
--- a/mlir/test/Target/openmp-llvm.mlir
+++ b/mlir/test/Target/openmp-llvm.mlir
@@ -302,7 +302,7 @@ llvm.func @test_omp_master() -> () {
 // CHECK: @[[$wsloop_loc_struct:.*]] = private unnamed_addr constant %struct.ident_t {{.*}} @[[$wsloop_loc]], {{.*}}
 
 // CHECK-LABEL: @wsloop_simple
-llvm.func @wsloop_simple(%arg0: !llvm.ptr<float>) {
+llvm.func @wsloop_simple(%arg0: !llvm.ptr<f32>) {
   %0 = llvm.mlir.constant(42 : index) : i64
   %1 = llvm.mlir.constant(10 : index) : i64
   %2 = llvm.mlir.constant(1 : index) : i64
@@ -313,9 +313,9 @@ llvm.func @wsloop_simple(%arg0: !llvm.ptr<float>) {
       // tested there. Just check that the right functions are called.
       // CHECK: call i32 @__kmpc_global_thread_num
       // CHECK: call void @__kmpc_for_static_init_{{.*}}(%struct.ident_t* @[[$wsloop_loc_struct]],
-      %3 = llvm.mlir.constant(2.000000e+00 : f32) : !llvm.float
-      %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-      llvm.store %3, %4 : !llvm.ptr<float>
+      %3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
+      %4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+      llvm.store %3, %4 : !llvm.ptr<f32>
       omp.yield
       // CHECK: call void @__kmpc_for_static_fini(%struct.ident_t* @[[$wsloop_loc_struct]],
     }) {operand_segment_sizes = dense<[1, 1, 1, 0, 0, 0, 0, 0, 0]> : vector<9xi32>} : (i64, i64, i64) -> ()

diff  --git a/mlir/test/Target/rocdl.mlir b/mlir/test/Target/rocdl.mlir
index 738a3fe7825e..b93c5f53e6ab 100644
--- a/mlir/test/Target/rocdl.mlir
+++ b/mlir/test/Target/rocdl.mlir
@@ -42,62 +42,62 @@ llvm.func @rocdl.barrier() {
   llvm.return
 }
 
-llvm.func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float,
-                   %arg2 : !llvm.vec<32 x float>, %arg3 : i32,
-                   %arg4 : !llvm.vec<16 x float>, %arg5 : !llvm.vec<4 x float>,
-                   %arg6 : !llvm.vec<4 x half>, %arg7 : !llvm.vec<32 x i32>,
+llvm.func @rocdl.xdlops(%arg0 : f32, %arg1 : f32,
+                   %arg2 : !llvm.vec<32 x f32>, %arg3 : i32,
+                   %arg4 : !llvm.vec<16 x f32>, %arg5 : !llvm.vec<4 x f32>,
+                   %arg6 : !llvm.vec<4 x f16>, %arg7 : !llvm.vec<32 x i32>,
                    %arg8 : !llvm.vec<16 x i32>, %arg9 : !llvm.vec<4 x i32>,
-                   %arg10 : !llvm.vec<2 x i16>) -> !llvm.vec<32 x float> {
+                   %arg10 : !llvm.vec<2 x i16>) -> !llvm.vec<32 x f32> {
   // CHECK-LABEL: rocdl.xdlops
   // CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x1f32(float %{{.*}}, float %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r0 = rocdl.mfma.f32.32x32x1f32 %arg0, %arg1, %arg2, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<32 x float>,
-                            i32, i32, i32) -> !llvm.vec<32 x float>
+                            (f32, f32, !llvm.vec<32 x f32>,
+                            i32, i32, i32) -> !llvm.vec<32 x f32>
 
   // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x1f32(float %{{.*}}, float %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r1 = rocdl.mfma.f32.16x16x1f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (f32, f32, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
   // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x4f32(float %{{.*}}, float %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r2 = rocdl.mfma.f32.16x16x4f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (f32, f32, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
   // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x1f32(float %{{.*}}, float %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r3 = rocdl.mfma.f32.4x4x1f32 %arg0, %arg1, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (f32, f32, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
   // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x2f32(float %{{.*}}, float %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r4= rocdl.mfma.f32.32x32x2f32 %arg0, %arg1, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.float, !llvm.float, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (f32, f32, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
   // CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r5 = rocdl.mfma.f32.32x32x4f16 %arg6, %arg6, %arg2, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<32 x float>,
-                            i32, i32, i32) -> !llvm.vec<32 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<32 x f32>,
+                            i32, i32, i32) -> !llvm.vec<32 x f32>
 
   // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r6 = rocdl.mfma.f32.16x16x4f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
   // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x4f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r7 = rocdl.mfma.f32.4x4x4f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
   // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x8f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r8 = rocdl.mfma.f32.32x32x8f16 %arg6, %arg6, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
   // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x16f16(<4 x half> %{{.*}}, <4 x half> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r9 = rocdl.mfma.f32.16x16x16f16 %arg6, %arg6, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<4 x half>, !llvm.vec<4 x half>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<4 x f16>, !llvm.vec<4 x f16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
   // CHECK: call <32 x i32> @llvm.amdgcn.mfma.i32.32x32x4i8(i32 %{{.*}}, i32 %{{.*}}, <32 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r10 = rocdl.mfma.i32.32x32x4i8 %arg3, %arg3, %arg7, %arg3, %arg3, %arg3 :
@@ -126,50 +126,50 @@ llvm.func @rocdl.xdlops(%arg0 : !llvm.float, %arg1 : !llvm.float,
 
   // CHECK: call <32 x float> @llvm.amdgcn.mfma.f32.32x32x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <32 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r15 = rocdl.mfma.f32.32x32x2bf16 %arg10, %arg10, %arg2, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x float>,
-                            i32, i32, i32) -> !llvm.vec<32 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<32 x f32>,
+                            i32, i32, i32) -> !llvm.vec<32 x f32>
 
   // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.16x16x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r16 = rocdl.mfma.f32.16x16x2bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
   // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.4x4x2bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r17 = rocdl.mfma.f32.4x4x2bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
   // CHECK: call <16 x float> @llvm.amdgcn.mfma.f32.32x32x4bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <16 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r18 = rocdl.mfma.f32.32x32x4bf16 %arg10, %arg10, %arg4, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x float>,
-                            i32, i32, i32) -> !llvm.vec<16 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<16 x f32>,
+                            i32, i32, i32) -> !llvm.vec<16 x f32>
 
   // CHECK: call <4 x float> @llvm.amdgcn.mfma.f32.16x16x8bf16(<2 x i16> %{{.*}}, <2 x i16> %{{.*}}, <4 x float> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i32 %{{.*}})
   %r19 = rocdl.mfma.f32.16x16x8bf16 %arg10, %arg10, %arg5, %arg3, %arg3, %arg3 :
-                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x float>,
-                            i32, i32, i32) -> !llvm.vec<4 x float>
+                            (!llvm.vec<2 x i16>, !llvm.vec<2 x i16>, !llvm.vec<4 x f32>,
+                            i32, i32, i32) -> !llvm.vec<4 x f32>
 
-  llvm.return %r0 : !llvm.vec<32 x float>
+  llvm.return %r0 : !llvm.vec<32 x f32>
 }
 
 llvm.func @rocdl.mubuf(%rsrc : !llvm.vec<4 x i32>, %vindex : i32,
                        %offset : i32, %glc : i1,
-                       %slc : i1, %vdata1 : !llvm.vec<1 x float>,
-                       %vdata2 : !llvm.vec<2 x float>, %vdata4 : !llvm.vec<4 x float>) {
+                       %slc : i1, %vdata1 : !llvm.vec<1 x f32>,
+                       %vdata2 : !llvm.vec<2 x f32>, %vdata4 : !llvm.vec<4 x f32>) {
   // CHECK-LABEL: rocdl.mubuf
   // CHECK: call <1 x float> @llvm.amdgcn.buffer.load.v1f32(<4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}})
-  %r1 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x float>
+  %r1 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x f32>
   // CHECK: call <2 x float> @llvm.amdgcn.buffer.load.v2f32(<4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}})
-  %r2 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x float>
+  %r2 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x f32>
   // CHECK: call <4 x float> @llvm.amdgcn.buffer.load.v4f32(<4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}})
-  %r4 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x float>
+  %r4 = rocdl.buffer.load %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x f32>
 
   // CHECK: call void @llvm.amdgcn.buffer.store.v1f32(<1 x float> %{{.*}}, <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}})
-  rocdl.buffer.store %vdata1, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x float>
+  rocdl.buffer.store %vdata1, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<1 x f32>
   // CHECK: call void @llvm.amdgcn.buffer.store.v2f32(<2 x float> %{{.*}}, <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}})
-  rocdl.buffer.store %vdata2, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x float>
+  rocdl.buffer.store %vdata2, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<2 x f32>
   // CHECK: call void @llvm.amdgcn.buffer.store.v4f32(<4 x float> %{{.*}}, <4 x i32> %{{.*}}, i32 %{{.*}}, i32 %{{.*}}, i1 %{{.*}}, i1 %{{.*}})
-  rocdl.buffer.store %vdata4, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x float>
+  rocdl.buffer.store %vdata4, %rsrc, %vindex, %offset, %glc, %slc : !llvm.vec<4 x f32>
 
   llvm.return
 }

diff  --git a/mlir/test/mlir-cpu-runner/simple.mlir b/mlir/test/mlir-cpu-runner/simple.mlir
index 4007ac7c4f5f..72d241439d06 100644
--- a/mlir/test/mlir-cpu-runner/simple.mlir
+++ b/mlir/test/mlir-cpu-runner/simple.mlir
@@ -14,43 +14,43 @@
 // RUN: rm %T/test.o
 
 // Declarations of C library functions.
-llvm.func @fabsf(!llvm.float) -> !llvm.float
+llvm.func @fabsf(f32) -> f32
 llvm.func @malloc(i64) -> !llvm.ptr<i8>
 llvm.func @free(!llvm.ptr<i8>)
 
 // Check that a simple function with a nested call works.
-llvm.func @main() -> !llvm.float {
-  %0 = llvm.mlir.constant(-4.200000e+02 : f32) : !llvm.float
-  %1 = llvm.call @fabsf(%0) : (!llvm.float) -> !llvm.float
-  llvm.return %1 : !llvm.float
+llvm.func @main() -> f32 {
+  %0 = llvm.mlir.constant(-4.200000e+02 : f32) : f32
+  %1 = llvm.call @fabsf(%0) : (f32) -> f32
+  llvm.return %1 : f32
 }
 // CHECK: 4.200000e+02
 
 // Helper typed functions wrapping calls to "malloc" and "free".
-llvm.func @allocation() -> !llvm.ptr<float> {
+llvm.func @allocation() -> !llvm.ptr<f32> {
   %0 = llvm.mlir.constant(4 : index) : i64
   %1 = llvm.call @malloc(%0) : (i64) -> !llvm.ptr<i8>
-  %2 = llvm.bitcast %1 : !llvm.ptr<i8> to !llvm.ptr<float>
-  llvm.return %2 : !llvm.ptr<float>
+  %2 = llvm.bitcast %1 : !llvm.ptr<i8> to !llvm.ptr<f32>
+  llvm.return %2 : !llvm.ptr<f32>
 }
-llvm.func @deallocation(%arg0: !llvm.ptr<float>) {
-  %0 = llvm.bitcast %arg0 : !llvm.ptr<float> to !llvm.ptr<i8>
+llvm.func @deallocation(%arg0: !llvm.ptr<f32>) {
+  %0 = llvm.bitcast %arg0 : !llvm.ptr<f32> to !llvm.ptr<i8>
   llvm.call @free(%0) : (!llvm.ptr<i8>) -> ()
   llvm.return
 }
 
 // Check that allocation and deallocation works, and that a custom entry point
 // works.
-llvm.func @foo() -> !llvm.float {
-  %0 = llvm.call @allocation() : () -> !llvm.ptr<float>
+llvm.func @foo() -> f32 {
+  %0 = llvm.call @allocation() : () -> !llvm.ptr<f32>
   %1 = llvm.mlir.constant(0 : index) : i64
-  %2 = llvm.mlir.constant(1.234000e+03 : f32) : !llvm.float
-  %3 = llvm.getelementptr %0[%1] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  llvm.store %2, %3 : !llvm.ptr<float>
-  %4 = llvm.getelementptr %0[%1] : (!llvm.ptr<float>, i64) -> !llvm.ptr<float>
-  %5 = llvm.load %4 : !llvm.ptr<float>
-  llvm.call @deallocation(%0) : (!llvm.ptr<float>) -> ()
-  llvm.return %5 : !llvm.float
+  %2 = llvm.mlir.constant(1.234000e+03 : f32) : f32
+  %3 = llvm.getelementptr %0[%1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  llvm.store %2, %3 : !llvm.ptr<f32>
+  %4 = llvm.getelementptr %0[%1] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
+  %5 = llvm.load %4 : !llvm.ptr<f32>
+  llvm.call @deallocation(%0) : (!llvm.ptr<f32>) -> ()
+  llvm.return %5 : f32
 }
 // NOMAIN: 1.234000e+03
 


        


More information about the Mlir-commits mailing list