[Mlir-commits] [mlir] 5ab6ef7 - [mlir][spirv] Change dialect name from 'spv' to 'spirv'

Jakub Kuderski llvmlistbot at llvm.org
Mon Sep 26 08:00:17 PDT 2022


Author: Jakub Kuderski
Date: 2022-09-26T10:58:30-04:00
New Revision: 5ab6ef758f0f549fb39bf9b34a6a743e989b212a

URL: https://github.com/llvm/llvm-project/commit/5ab6ef758f0f549fb39bf9b34a6a743e989b212a
DIFF: https://github.com/llvm/llvm-project/commit/5ab6ef758f0f549fb39bf9b34a6a743e989b212a.diff

LOG: [mlir][spirv] Change dialect name from 'spv' to 'spirv'

Tested with `check-mlir` and `check-mlir-integration`.

Issue: https://github.com/llvm/llvm-project/issues/56863

Reviewed By: antiagainst

Differential Revision: https://reviews.llvm.org/D134620

Added: 
    

Modified: 
    mlir/docs/DialectConversion.md
    mlir/docs/Dialects/SPIR-V.md
    mlir/docs/PassManagement.md
    mlir/docs/SPIRVToLLVMDialectConversion.md
    mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRV.h
    mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRVPass.h
    mlir/include/mlir/Conversion/Passes.td
    mlir/include/mlir/Dialect/SPIRV/IR/CMakeLists.txt
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAttributes.h
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
    mlir/include/mlir/Dialect/SPIRV/IR/SPIRVTypes.h
    mlir/include/mlir/Dialect/SPIRV/IR/TargetAndABI.h
    mlir/include/mlir/Dialect/SPIRV/Linking/ModuleCombiner.h
    mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.h
    mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.td
    mlir/include/mlir/IR/OpAsmInterface.td
    mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
    mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRV.cpp
    mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRV.cpp
    mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
    mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
    mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
    mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRV.cpp
    mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
    mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
    mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
    mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
    mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp
    mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVAttributes.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.td
    mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
    mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
    mlir/lib/Dialect/SPIRV/IR/TargetAndABI.cpp
    mlir/lib/Dialect/SPIRV/Linking/ModuleCombiner/ModuleCombiner.cpp
    mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
    mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
    mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
    mlir/lib/Dialect/SPIRV/Transforms/UpdateVCEPass.cpp
    mlir/lib/Target/SPIRV/Deserialization/DeserializeOps.cpp
    mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
    mlir/lib/Target/SPIRV/Deserialization/Deserializer.h
    mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
    mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
    mlir/lib/Target/SPIRV/Serialization/Serializer.h
    mlir/lib/Target/SPIRV/TranslateRegistration.cpp
    mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
    mlir/test/Conversion/ArithmeticToSPIRV/fast-math.mlir
    mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
    mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
    mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
    mlir/test/Conversion/GPUToSPIRV/builtins.mlir
    mlir/test/Conversion/GPUToSPIRV/entry-point.mlir
    mlir/test/Conversion/GPUToSPIRV/gpu-to-spirv.mlir
    mlir/test/Conversion/GPUToSPIRV/load-store.mlir
    mlir/test/Conversion/GPUToSPIRV/module-opencl.mlir
    mlir/test/Conversion/GPUToSPIRV/shuffle.mlir
    mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
    mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
    mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
    mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
    mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
    mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
    mlir/test/Conversion/MemRefToSPIRV/alloca.mlir
    mlir/test/Conversion/MemRefToSPIRV/map-storage-class.mlir
    mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
    mlir/test/Conversion/SCFToSPIRV/for.mlir
    mlir/test/Conversion/SCFToSPIRV/if.mlir
    mlir/test/Conversion/SCFToSPIRV/while.mlir
    mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/gl-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
    mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/module-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
    mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm-invalid.mlir
    mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
    mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
    mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
    mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
    mlir/test/Dialect/SPIRV/IR/asm-op-interface.mlir
    mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir
    mlir/test/Dialect/SPIRV/IR/availability.mlir
    mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
    mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
    mlir/test/Dialect/SPIRV/IR/cast-ops.mlir
    mlir/test/Dialect/SPIRV/IR/composite-ops.mlir
    mlir/test/Dialect/SPIRV/IR/control-flow-ops.mlir
    mlir/test/Dialect/SPIRV/IR/cooperative-matrix-ops.mlir
    mlir/test/Dialect/SPIRV/IR/gl-ops.mlir
    mlir/test/Dialect/SPIRV/IR/group-ops.mlir
    mlir/test/Dialect/SPIRV/IR/image-ops.mlir
    mlir/test/Dialect/SPIRV/IR/joint-matrix-ops.mlir
    mlir/test/Dialect/SPIRV/IR/logical-ops.mlir
    mlir/test/Dialect/SPIRV/IR/matrix-ops.mlir
    mlir/test/Dialect/SPIRV/IR/memory-ops.mlir
    mlir/test/Dialect/SPIRV/IR/misc-ops.mlir
    mlir/test/Dialect/SPIRV/IR/non-uniform-ops.mlir
    mlir/test/Dialect/SPIRV/IR/ocl-ops.mlir
    mlir/test/Dialect/SPIRV/IR/structure-ops.mlir
    mlir/test/Dialect/SPIRV/IR/target-and-abi.mlir
    mlir/test/Dialect/SPIRV/IR/target-env.mlir
    mlir/test/Dialect/SPIRV/IR/types.mlir
    mlir/test/Dialect/SPIRV/Linking/ModuleCombiner/basic.mlir
    mlir/test/Dialect/SPIRV/Linking/ModuleCombiner/conflict-resolution.mlir
    mlir/test/Dialect/SPIRV/Linking/ModuleCombiner/deduplication.mlir
    mlir/test/Dialect/SPIRV/Linking/ModuleCombiner/symbol-rename-listener.mlir
    mlir/test/Dialect/SPIRV/Transforms/abi-interface-opencl.mlir
    mlir/test/Dialect/SPIRV/Transforms/abi-interface.mlir
    mlir/test/Dialect/SPIRV/Transforms/abi-load-store.mlir
    mlir/test/Dialect/SPIRV/Transforms/canonicalize.mlir
    mlir/test/Dialect/SPIRV/Transforms/gl-canonicalize.mlir
    mlir/test/Dialect/SPIRV/Transforms/inlining.mlir
    mlir/test/Dialect/SPIRV/Transforms/layout-decoration.mlir
    mlir/test/Dialect/SPIRV/Transforms/rewrite-inserts.mlir
    mlir/test/Dialect/SPIRV/Transforms/unify-aliased-resource.mlir
    mlir/test/Dialect/SPIRV/Transforms/vce-deduction.mlir
    mlir/test/Target/SPIRV/arithmetic-ops.mlir
    mlir/test/Target/SPIRV/array.mlir
    mlir/test/Target/SPIRV/atomic-ops.mlir
    mlir/test/Target/SPIRV/barrier-ops.mlir
    mlir/test/Target/SPIRV/bit-ops.mlir
    mlir/test/Target/SPIRV/cast-ops.mlir
    mlir/test/Target/SPIRV/composite-op.mlir
    mlir/test/Target/SPIRV/constant.mlir
    mlir/test/Target/SPIRV/cooperative-matrix-ops.mlir
    mlir/test/Target/SPIRV/debug.mlir
    mlir/test/Target/SPIRV/decorations.mlir
    mlir/test/Target/SPIRV/entry-point.mlir
    mlir/test/Target/SPIRV/execution-mode.mlir
    mlir/test/Target/SPIRV/function-call.mlir
    mlir/test/Target/SPIRV/gl-ops.mlir
    mlir/test/Target/SPIRV/global-variable.mlir
    mlir/test/Target/SPIRV/group-ops.mlir
    mlir/test/Target/SPIRV/image-ops.mlir
    mlir/test/Target/SPIRV/image.mlir
    mlir/test/Target/SPIRV/joint-matrix-ops.mlir
    mlir/test/Target/SPIRV/logical-ops.mlir
    mlir/test/Target/SPIRV/loop.mlir
    mlir/test/Target/SPIRV/matrix.mlir
    mlir/test/Target/SPIRV/memory-ops.mlir
    mlir/test/Target/SPIRV/module.mlir
    mlir/test/Target/SPIRV/non-uniform-ops.mlir
    mlir/test/Target/SPIRV/ocl-ops.mlir
    mlir/test/Target/SPIRV/phi.mlir
    mlir/test/Target/SPIRV/sampled-image.mlir
    mlir/test/Target/SPIRV/selection.mlir
    mlir/test/Target/SPIRV/spec-constant.mlir
    mlir/test/Target/SPIRV/struct.mlir
    mlir/test/Target/SPIRV/terminator.mlir
    mlir/test/Target/SPIRV/undef.mlir
    mlir/test/lib/Dialect/SPIRV/TestAvailability.cpp
    mlir/test/lib/Dialect/SPIRV/TestEntryPointAbi.cpp
    mlir/test/mlir-opt/commandline.mlir
    mlir/test/mlir-spirv-cpu-runner/double.mlir
    mlir/test/mlir-spirv-cpu-runner/simple_add.mlir
    mlir/test/mlir-vulkan-runner/addf.mlir
    mlir/test/mlir-vulkan-runner/addi.mlir
    mlir/test/mlir-vulkan-runner/addi8.mlir
    mlir/test/mlir-vulkan-runner/mulf.mlir
    mlir/test/mlir-vulkan-runner/subf.mlir
    mlir/test/mlir-vulkan-runner/time.mlir
    mlir/unittests/Dialect/SPIRV/DeserializationTest.cpp

Removed: 
    


################################################################################
diff  --git a/mlir/docs/DialectConversion.md b/mlir/docs/DialectConversion.md
index 9f9b95de879f..3de7a897a3b8 100644
--- a/mlir/docs/DialectConversion.md
+++ b/mlir/docs/DialectConversion.md
@@ -431,12 +431,12 @@ Legalizing operation : 'func.return'(0x608000002e20) {
   } -> FAILURE : unable to fold
 
   * Pattern : 'func.return -> ()' {
-    ** Insert  : 'spv.Return'(0x6070000453e0)
+    ** Insert  : 'spirv.Return'(0x6070000453e0)
     ** Replace : 'func.return'(0x608000002e20)
 
     //===-------------------------------------------===//
-    Legalizing operation : 'spv.Return'(0x6070000453e0) {
-      "spv.Return"() : () -> ()
+    Legalizing operation : 'spirv.Return'(0x6070000453e0) {
+      "spirv.Return"() : () -> ()
 
     } -> SUCCESS : operation marked legal by the target
     //===-------------------------------------------===//
@@ -448,5 +448,5 @@ Legalizing operation : 'func.return'(0x608000002e20) {
 This output is describing the legalization of an `func.return` operation. We
 first try to legalize by folding the operation, but that is unsuccessful for
 `func.return`. From there, a pattern is applied that replaces the `func.return`
-with a `spv.Return`. The newly generated `spv.Return` is then processed for
+with a `spirv.Return`. The newly generated `spirv.Return` is then processed for
 legalization, but is found to already legal as per the target.

diff  --git a/mlir/docs/Dialects/SPIR-V.md b/mlir/docs/Dialects/SPIR-V.md
index ec9a2d8ac365..b770498c33ed 100644
--- a/mlir/docs/Dialects/SPIR-V.md
+++ b/mlir/docs/Dialects/SPIR-V.md
@@ -76,36 +76,36 @@ extensible as SPIR-V specification.
 
 The SPIR-V dialect adopts the following conventions for IR:
 
-*   The prefix for all SPIR-V types and operations are `spv.`.
+*   The prefix for all SPIR-V types and operations are `spirv.`.
 *   All instructions in an extended instruction set are further qualified with
     the extended instruction set's prefix. For example, all operations in the
-    GLSL extended instruction set have the prefix of `spv.GL.`.
+    GLSL extended instruction set have the prefix of `spirv.GL.`.
 *   Ops that directly mirror instructions in the specification have `CamelCase`
     names that are the same as the instruction opnames (without the `Op`
-    prefix). For example, `spv.FMul` is a direct mirror of `OpFMul` in the
+    prefix). For example, `spirv.FMul` is a direct mirror of `OpFMul` in the
     specification. Such an op will be serialized into and deserialized from one
     SPIR-V instruction.
 *   Ops with `snake_case` names are those that have 
diff erent representation
     from corresponding instructions (or concepts) in the specification. These
-    ops are mostly for defining the SPIR-V structure. For example, `spv.module`
-    and `spv.Constant`. They may correspond to one or more instructions during
+    ops are mostly for defining the SPIR-V structure. For example, `spirv.module`
+    and `spirv.Constant`. They may correspond to one or more instructions during
     (de)serialization.
 *   Ops with `mlir.snake_case` names are those that have no corresponding
     instructions (or concepts) in the binary format. They are introduced to
-    satisfy MLIR structural requirements. For example, `spv.mlir.merge`. They
+    satisfy MLIR structural requirements. For example, `spirv.mlir.merge`. They
     map to no instructions during (de)serialization.
 
-(TODO: consider merging the last two cases and adopting `spv.mlir.` prefix for
+(TODO: consider merging the last two cases and adopting `spirv.mlir.` prefix for
 them.)
 
 ## Module
 
-A SPIR-V module is defined via the `spv.module` op, which has one region that
+A SPIR-V module is defined via the `spirv.module` op, which has one region that
 contains one block. Model-level instructions, including function definitions,
 are all placed inside the block. Functions are defined using the builtin `func`
 op.
 
-We choose to model a SPIR-V module with a dedicated `spv.module` op based on the
+We choose to model a SPIR-V module with a dedicated `spirv.module` op based on the
 following considerations:
 
 *   It maps cleanly to a SPIR-V module in the specification.
@@ -114,11 +114,11 @@ following considerations:
 *   We can attach additional model-level attributes.
 *   We can control custom assembly form.
 
-The `spv.module` op's region cannot capture SSA values from outside, neither
-implicitly nor explicitly. The `spv.module` op's region is closed as to what ops
+The `spirv.module` op's region cannot capture SSA values from outside, neither
+implicitly nor explicitly. The `spirv.module` op's region is closed as to what ops
 can appear inside: apart from the builtin `func` op, it can only contain ops
-from the SPIR-V dialect. The `spv.module` op's verifier enforces this rule. This
-meaningfully guarantees that a `spv.module` can be the entry point and boundary
+from the SPIR-V dialect. The `spirv.module` op's verifier enforces this rule. This
+meaningfully guarantees that a `spirv.module` can be the entry point and boundary
 for serialization.
 
 ### Module-level operations
@@ -148,7 +148,7 @@ instructions are represented in the SPIR-V dialect:
 #### Use MLIR attributes for metadata
 
 *   Requirements for capabilities, extensions, extended instruction sets,
-    addressing model, and memory model are conveyed using `spv.module`
+    addressing model, and memory model are conveyed using `spirv.module`
     attributes. This is considered better because these information are for the
     execution environment. It's easier to probe them if on the module op itself.
 *   Annotations/decoration instructions are "folded" into the instructions they
@@ -166,23 +166,23 @@ instructions are represented in the SPIR-V dialect:
 #### Unify and localize constants
 
 *   Various normal constant instructions are represented by the same
-    `spv.Constant` op. Those instructions are just for constants of 
diff erent
+    `spirv.Constant` op. Those instructions are just for constants of 
diff erent
     types; using one op to represent them reduces IR verbosity and makes
     transformations less tedious.
-*   Normal constants are not placed in `spv.module`'s region; they are localized
+*   Normal constants are not placed in `spirv.module`'s region; they are localized
     into functions. This is to make functions in the SPIR-V dialect to be
     isolated and explicit capturing. Constants are cheap to duplicate given
     attributes are made unique in `MLIRContext`.
 
 #### Adopt symbol-based global variables and specialization constant
 
-*   Global variables are defined with the `spv.GlobalVariable` op. They do not
+*   Global variables are defined with the `spirv.GlobalVariable` op. They do not
     generate SSA values. Instead they have symbols and should be referenced via
-    symbols. To use global variables in a function block, `spv.mlir.addressof` is
+    symbols. To use global variables in a function block, `spirv.mlir.addressof` is
     needed to turn the symbol into an SSA value.
-*   Specialization constants are defined with the `spv.SpecConstant` op. Similar
+*   Specialization constants are defined with the `spirv.SpecConstant` op. Similar
     to global variables, they do not generate SSA values and have symbols for
-    reference, too. `spv.mlir.referenceof` is needed to turn the symbol into an SSA
+    reference, too. `spirv.mlir.referenceof` is needed to turn the symbol into an SSA
     value for use in a function block.
 
 The above choices enables functions in the SPIR-V dialect to be isolated and
@@ -200,10 +200,10 @@ explicit capturing.
 
 *   A SPIR-V module can have multiple entry points. And these entry points refer
     to the function and interface variables. It’s not suitable to model them as
-    `spv.module` op attributes. We can model them as normal ops of using symbol
+    `spirv.module` op attributes. We can model them as normal ops of using symbol
     references.
 *   Similarly for execution modes, which are coupled with entry points, we can
-    model them as normal ops in `spv.module`'s region.
+    model them as normal ops in `spirv.module`'s region.
 
 ## Decorations
 
@@ -226,8 +226,8 @@ OpDecorate %v2 NoContraction
 We can represent them in the SPIR-V dialect as:
 
 ```mlir
-%v1 = "spv.FMul"(%0, %0) {RelaxedPrecision: unit} : (f32, f32) -> (f32)
-%v2 = "spv.FMul"(%1, %1) {NoContraction: unit} : (f32, f32) -> (f32)
+%v1 = "spirv.FMul"(%0, %0) {RelaxedPrecision: unit} : (f32, f32) -> (f32)
+%v2 = "spirv.FMul"(%1, %1) {NoContraction: unit} : (f32, f32) -> (f32)
 ```
 
 This approach benefits transformations. Essentially those decorations are just
@@ -288,16 +288,16 @@ element-type ::= integer-type
                | vector-type
                | spirv-type
 
-array-type ::= `!spv.array` `<` integer-literal `x` element-type
+array-type ::= `!spirv.array` `<` integer-literal `x` element-type
                (`,` `stride` `=` integer-literal)? `>`
 ```
 
 For example,
 
 ```mlir
-!spv.array<4 x i32>
-!spv.array<4 x i32, stride = 4>
-!spv.array<16 x vector<4 x f32>>
+!spirv.array<4 x i32>
+!spirv.array<4 x i32, stride = 4>
+!spirv.array<16 x vector<4 x f32>>
 ```
 
 ### Image type
@@ -317,7 +317,7 @@ sampler-use-info ::= `SamplerUnknown` | `NeedSampler` | `NoSampler`
 
 format ::= `Unknown` | `Rgba32f` | <and other SPIR-V Image Formats...>
 
-image-type ::= `!spv.image<` element-type `,` dim `,` depth-info `,`
+image-type ::= `!spirv.image<` element-type `,` dim `,` depth-info `,`
                            arrayed-info `,` sampling-info `,`
                            sampler-use-info `,` format `>`
 ```
@@ -325,8 +325,8 @@ image-type ::= `!spv.image<` element-type `,` dim `,` depth-info `,`
 For example,
 
 ```mlir
-!spv.image<f32, 1D, NoDepth, NonArrayed, SingleSampled, SamplerUnknown, Unknown>
-!spv.image<f32, Cube, IsDepth, Arrayed, MultiSampled, NeedSampler, Rgba32f>
+!spirv.image<f32, 1D, NoDepth, NonArrayed, SingleSampled, SamplerUnknown, Unknown>
+!spirv.image<f32, Cube, IsDepth, Arrayed, MultiSampled, NeedSampler, Rgba32f>
 ```
 
 ### Pointer type
@@ -339,14 +339,14 @@ storage-class ::= `UniformConstant`
                 | `Workgroup`
                 | <and other storage classes...>
 
-pointer-type ::= `!spv.ptr<` element-type `,` storage-class `>`
+pointer-type ::= `!spirv.ptr<` element-type `,` storage-class `>`
 ```
 
 For example,
 
 ```mlir
-!spv.ptr<i32, Function>
-!spv.ptr<vector<4 x f32>, Uniform>
+!spirv.ptr<i32, Function>
+!spirv.ptr<vector<4 x f32>, Uniform>
 ```
 
 ### Runtime array type
@@ -354,22 +354,22 @@ For example,
 This corresponds to SPIR-V [runtime array type][RuntimeArrayType]. Its syntax is
 
 ```
-runtime-array-type ::= `!spv.rtarray` `<` element-type (`,` `stride` `=` integer-literal)? `>`
+runtime-array-type ::= `!spirv.rtarray` `<` element-type (`,` `stride` `=` integer-literal)? `>`
 ```
 
 For example,
 
 ```mlir
-!spv.rtarray<i32>
-!spv.rtarray<i32, stride=4>
-!spv.rtarray<vector<4 x f32>>
+!spirv.rtarray<i32>
+!spirv.rtarray<i32, stride=4>
+!spirv.rtarray<vector<4 x f32>>
 ```
 ### Sampled image type
 
 This corresponds to SPIR-V [sampled image type][SampledImageType]. Its syntax is
 
 ```
-sampled-image-type ::= `!spv.sampled_image<!spv.image<` element-type `,` dim `,` depth-info `,`
+sampled-image-type ::= `!spirv.sampled_image<!spirv.image<` element-type `,` dim `,` depth-info `,`
                                                         arrayed-info `,` sampling-info `,`
                                                         sampler-use-info `,` format `>>`
 ```
@@ -377,8 +377,8 @@ sampled-image-type ::= `!spv.sampled_image<!spv.image<` element-type `,` dim `,`
 For example,
 
 ```mlir
-!spv.sampled_image<!spv.image<f32, Dim1D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>
-!spv.sampled_image<!spv.image<i32, Rect, DepthUnknown, Arrayed, MultiSampled, NeedSampler, R8ui>>
+!spirv.sampled_image<!spirv.image<f32, Dim1D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>
+!spirv.sampled_image<!spirv.image<i32, Rect, DepthUnknown, Arrayed, MultiSampled, NeedSampler, R8ui>>
 ```
 
 ### Struct type
@@ -387,17 +387,17 @@ This corresponds to SPIR-V [struct type][StructType]. Its syntax is
 
 ```
 struct-member-decoration ::= integer-literal? spirv-decoration*
-struct-type ::= `!spv.struct<` spirv-type (`[` struct-member-decoration `]`)?
+struct-type ::= `!spirv.struct<` spirv-type (`[` struct-member-decoration `]`)?
                      (`, ` spirv-type (`[` struct-member-decoration `]`)?
 ```
 
 For Example,
 
 ```mlir
-!spv.struct<f32>
-!spv.struct<f32 [0]>
-!spv.struct<f32, !spv.image<f32, 1D, NoDepth, NonArrayed, SingleSampled, SamplerUnknown, Unknown>>
-!spv.struct<f32 [0], i32 [4]>
+!spirv.struct<f32>
+!spirv.struct<f32 [0]>
+!spirv.struct<f32, !spirv.image<f32, 1D, NoDepth, NonArrayed, SingleSampled, SamplerUnknown, Unknown>>
+!spirv.struct<f32 [0], i32 [4]>
 ```
 
 ## Function
@@ -423,12 +423,12 @@ more concisely:
 
 ```mlir
 func.func @f(%arg: i32) -> i32 {
-  "spv.ReturnValue"(%arg) : (i32) -> (i32)
+  "spirv.ReturnValue"(%arg) : (i32) -> (i32)
 }
 ```
 
 A SPIR-V function can have at most one result. It cannot contain nested
-functions or non-SPIR-V operations. `spv.module` verifies these requirements.
+functions or non-SPIR-V operations. `spirv.module` verifies these requirements.
 
 A major 
diff erence between the SPIR-V dialect and the SPIR-V specification for
 functions is that the former are isolated and require explicit capturing, while
@@ -442,7 +442,7 @@ infrastructure require ops to be isolated, e.g., the
 [greedy pattern rewriter][GreedyPatternRewriter] can only act on ops isolated
 from above.
 
-(TODO: create a dedicated `spv.fn` op for SPIR-V functions.)
+(TODO: create a dedicated `spirv.fn` op for SPIR-V functions.)
 
 ## Operations
 
@@ -475,9 +475,9 @@ For example,
 can be represented in the dialect as
 
 ```mlir
-%0 = "spv.Constant"() { value = 42 : i32 } : () -> i32
-%1 = "spv.Variable"(%0) { storage_class = "Function" } : (i32) -> !spv.ptr<i32, Function>
-%2 = "spv.IAdd"(%0, %0) : (i32, i32) -> i32
+%0 = "spirv.Constant"() { value = 42 : i32 } : () -> i32
+%1 = "spirv.Variable"(%0) { storage_class = "Function" } : (i32) -> !spirv.ptr<i32, Function>
+%2 = "spirv.IAdd"(%0, %0) : (i32, i32) -> i32
 ```
 
 Operation documentation is written in each op's Op Definition Spec using
@@ -508,8 +508,8 @@ proper name prefix. For example, for
 we can have
 
 ```mlir
-%1 = "spv.GL.Log"(%cst) : (f32) -> (f32)
-%2 = "spv.GL.Sqrt"(%cst) : (f32) -> (f32)
+%1 = "spirv.GL.Log"(%cst) : (f32) -> (f32)
+%2 = "spirv.GL.Sqrt"(%cst) : (f32) -> (f32)
 ```
 
 ## Control Flow
@@ -525,21 +525,21 @@ control flow construct. With this approach, it's easier to discover all blocks
 belonging to a structured control flow construct. It is also more idiomatic to
 MLIR system.
 
-We introduce a `spv.mlir.selection` and `spv.mlir.loop` op for structured selections and
+We introduce a `spirv.mlir.selection` and `spirv.mlir.loop` op for structured selections and
 loops, respectively. The merge targets are the next ops following them. Inside
-their regions, a special terminator, `spv.mlir.merge` is introduced for branching to
+their regions, a special terminator, `spirv.mlir.merge` is introduced for branching to
 the merge target.
 
 ### Selection
 
-`spv.mlir.selection` defines a selection construct. It contains one region. The
+`spirv.mlir.selection` defines a selection construct. It contains one region. The
 region should contain at least two blocks: one selection header block and one
 merge block.
 
 *   The selection header block should be the first block. It should contain the
-    `spv.BranchConditional` or `spv.Switch` op.
+    `spirv.BranchConditional` or `spirv.Switch` op.
 *   The merge block should be the last block. The merge block should only
-    contain a `spv.mlir.merge` op. Any block can branch to the merge block for early
+    contain a `spirv.mlir.merge` op. Any block can branch to the merge block for early
     exit.
 
 ```
@@ -581,24 +581,24 @@ It will be represented as
 
 ```mlir
 func.func @selection(%cond: i1) -> () {
-  %zero = spv.Constant 0: i32
-  %one = spv.Constant 1: i32
-  %two = spv.Constant 2: i32
-  %x = spv.Variable init(%zero) : !spv.ptr<i32, Function>
+  %zero = spirv.Constant 0: i32
+  %one = spirv.Constant 1: i32
+  %two = spirv.Constant 2: i32
+  %x = spirv.Variable init(%zero) : !spirv.ptr<i32, Function>
 
-  spv.mlir.selection {
-    spv.BranchConditional %cond, ^then, ^else
+  spirv.mlir.selection {
+    spirv.BranchConditional %cond, ^then, ^else
 
   ^then:
-    spv.Store "Function" %x, %one : i32
-    spv.Branch ^merge
+    spirv.Store "Function" %x, %one : i32
+    spirv.Branch ^merge
 
   ^else:
-    spv.Store "Function" %x, %two : i32
-    spv.Branch ^merge
+    spirv.Store "Function" %x, %two : i32
+    spirv.Branch ^merge
 
   ^merge:
-    spv.mlir.merge
+    spirv.mlir.merge
   }
 
   // ...
@@ -608,14 +608,14 @@ func.func @selection(%cond: i1) -> () {
 
 ### Loop
 
-`spv.mlir.loop` defines a loop construct. It contains one region. The region should
+`spirv.mlir.loop` defines a loop construct. It contains one region. The region should
 contain at least four blocks: one entry block, one loop header block, one loop
 continue block, one merge block.
 
 *   The entry block should be the first block and it should jump to the loop
     header block, which is the second block.
 *   The merge block should be the last block. The merge block should only
-    contain a `spv.mlir.merge` op. Any block except the entry block can branch to
+    contain a `spirv.mlir.merge` op. Any block except the entry block can branch to
     the merge block for early exit.
 *   The continue block should be the second to last block and it should have a
     branch to the loop header block.
@@ -669,30 +669,30 @@ It will be represented as
 
 ```mlir
 func.func @loop(%count : i32) -> () {
-  %zero = spv.Constant 0: i32
-  %one = spv.Constant 1: i32
-  %var = spv.Variable init(%zero) : !spv.ptr<i32, Function>
+  %zero = spirv.Constant 0: i32
+  %one = spirv.Constant 1: i32
+  %var = spirv.Variable init(%zero) : !spirv.ptr<i32, Function>
 
-  spv.mlir.loop {
-    spv.Branch ^header
+  spirv.mlir.loop {
+    spirv.Branch ^header
 
   ^header:
-    %val0 = spv.Load "Function" %var : i32
-    %cmp = spv.SLessThan %val0, %count : i32
-    spv.BranchConditional %cmp, ^body, ^merge
+    %val0 = spirv.Load "Function" %var : i32
+    %cmp = spirv.SLessThan %val0, %count : i32
+    spirv.BranchConditional %cmp, ^body, ^merge
 
   ^body:
     // ...
-    spv.Branch ^continue
+    spirv.Branch ^continue
 
   ^continue:
-    %val1 = spv.Load "Function" %var : i32
-    %add = spv.IAdd %val1, %one : i32
-    spv.Store "Function" %var, %add : i32
-    spv.Branch ^header
+    %val1 = spirv.Load "Function" %var : i32
+    %add = spirv.IAdd %val1, %one : i32
+    spirv.Store "Function" %var, %add : i32
+    spirv.Branch ^header
 
   ^merge:
-    spv.mlir.merge
+    spirv.mlir.merge
   }
   return
 }
@@ -729,28 +729,28 @@ It will be represented as:
 
 ```mlir
 func.func @foo() -> () {
-  %var = spv.Variable : !spv.ptr<i32, Function>
+  %var = spirv.Variable : !spirv.ptr<i32, Function>
 
-  spv.mlir.selection {
-    %true = spv.Constant true
-    spv.BranchConditional %true, ^true, ^false
+  spirv.mlir.selection {
+    %true = spirv.Constant true
+    spirv.BranchConditional %true, ^true, ^false
 
   ^true:
-    %zero = spv.Constant 0 : i32
-    spv.Branch ^phi(%zero: i32)
+    %zero = spirv.Constant 0 : i32
+    spirv.Branch ^phi(%zero: i32)
 
   ^false:
-    %one = spv.Constant 1 : i32
-    spv.Branch ^phi(%one: i32)
+    %one = spirv.Constant 1 : i32
+    spirv.Branch ^phi(%one: i32)
 
   ^phi(%arg: i32):
-    spv.Store "Function" %var, %arg : i32
-    spv.Return
+    spirv.Store "Function" %var, %arg : i32
+    spirv.Return
 
   ^merge:
-    spv.mlir.merge
+    spirv.mlir.merge
   }
-  spv.Return
+  spirv.Return
 }
 ```
 
@@ -778,7 +778,7 @@ implement the above interfaces.
 SPIR-V ops' availability implementation methods are automatically synthesized
 from the availability specification on each op and enum attribute in TableGen.
 An op needs to look into not only the opcode but also operands to derive its
-availability requirements. For example, `spv.ControlBarrier` requires no
+availability requirements. For example, `spirv.ControlBarrier` requires no
 special capability if the execution scope is `Subgroup`, but it will require
 the `VulkanMemoryModel` capability if the scope is `QueueFamily`.
 
@@ -801,8 +801,8 @@ instructions.
 
 SPIR-V compilation should also take into consideration of the execution
 environment, so we generate SPIR-V modules valid for the target environment.
-This is conveyed by the `spv.target_env` (`spirv::TargetEnvAttr`) attribute. It
-should be of `#spv.target_env` attribute kind, which is defined as:
+This is conveyed by the `spirv.target_env` (`spirv::TargetEnvAttr`) attribute. It
+should be of `#spirv.target_env` attribute kind, which is defined as:
 
 ```
 spirv-version    ::= `v1.0` | `v1.1` | ...
@@ -817,7 +817,7 @@ spirv-capability-elements ::= spirv-capability (`,` spirv-capability)*
 
 spirv-resource-limits ::= dictionary-attribute
 
-spirv-vce-attribute ::= `#` `spv.vce` `<`
+spirv-vce-attribute ::= `#` `spirv.vce` `<`
                             spirv-version `,`
                             spirv-capability-list `,`
                             spirv-extensions-list `>`
@@ -827,7 +827,7 @@ spirv-device-type ::= `DiscreteGPU` | `IntegratedGPU` | `CPU` | ...
 spirv-device-id ::= integer-literal
 spirv-device-info ::= spirv-vendor-id (`:` spirv-device-type (`:` spirv-device-id)?)?
 
-spirv-target-env-attribute ::= `#` `spv.target_env` `<`
+spirv-target-env-attribute ::= `#` `spirv.target_env` `<`
                                   spirv-vce-attribute,
                                   (spirv-device-info `,`)?
                                   spirv-resource-limits `>`
@@ -835,7 +835,7 @@ spirv-target-env-attribute ::= `#` `spv.target_env` `<`
 
 The attribute has a few fields:
 
-*   A `#spv.vce` (`spirv::VerCapExtAttr`) attribute:
+*   A `#spirv.vce` (`spirv::VerCapExtAttr`) attribute:
     *   The target SPIR-V version.
     *   A list of SPIR-V extensions for the target.
     *   A list of SPIR-V capabilities for the target.
@@ -848,8 +848,8 @@ For example,
 
 ```
 module attributes {
-spv.target_env = #spv.target_env<
-    #spv.vce<v1.3, [Shader, GroupNonUniform], [SPV_KHR_8bit_storage]>,
+spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.3, [Shader, GroupNonUniform], [SPV_KHR_8bit_storage]>,
     ARM:IntegratedGPU,
     {
       max_compute_workgroup_invocations = 128 : i32,
@@ -858,7 +858,7 @@ spv.target_env = #spv.target_env<
 } { ... }
 ```
 
-Dialect conversion framework will utilize the information in `spv.target_env` to
+Dialect conversion framework will utilize the information in `spirv.target_env` to
 properly filter out patterns and ops not available in the target execution
 environment. When targeting SPIR-V, one needs to create a
 [`SPIRVConversionTarget`](#spirvconversiontarget) by providing such an
@@ -904,26 +904,26 @@ to and guide the SPIR-V compilation path.
 The SPIR-V dialect defines [a few attributes][MlirSpirvAbi] for specifying these
 interfaces:
 
-*   `spv.entry_point_abi` is a struct attribute that should be attached to the
+*   `spirv.entry_point_abi` is a struct attribute that should be attached to the
     entry function. It contains:
     *   `local_size` for specifying the local work group size for the dispatch.
-*   `spv.interface_var_abi` is attribute that should be attached to each operand
-    and result of the entry function. It should be of `#spv.interface_var_abi`
+*   `spirv.interface_var_abi` is attribute that should be attached to each operand
+    and result of the entry function. It should be of `#spirv.interface_var_abi`
     attribute kind, which is defined as:
 
 ```
 spv-storage-class     ::= `StorageBuffer` | ...
 spv-descriptor-set    ::= integer-literal
 spv-binding           ::= integer-literal
-spv-interface-var-abi ::= `#` `spv.interface_var_abi` `<(` spv-descriptor-set
+spv-interface-var-abi ::= `#` `spirv.interface_var_abi` `<(` spv-descriptor-set
                           `,` spv-binding `)` (`,` spv-storage-class)? `>`
 ```
 
 For example,
 
 ```
-#spv.interface_var_abi<(0, 0), StorageBuffer>
-#spv.interface_var_abi<(0, 1)>
+#spirv.interface_var_abi<(0, 0), StorageBuffer>
+#spirv.interface_var_abi<(0, 1)>
 ```
 
 The attribute has a few fields:
@@ -936,11 +936,11 @@ The SPIR-V dialect provides a [`LowerABIAttributesPass`][MlirSpirvPasses] that
 uses this information to lower the entry point function and its ABI consistent
 with the Vulkan validation rules. Specifically,
 
-*   Creates `spv.GlobalVariable`s for the arguments, and replaces all uses of
+*   Creates `spirv.GlobalVariable`s for the arguments, and replaces all uses of
     the argument with this variable. The SSA value used for replacement is
-    obtained using the `spv.mlir.addressof` operation.
-*   Adds the `spv.EntryPoint` and `spv.ExecutionMode` operations into the
-    `spv.module` for the entry function.
+    obtained using the `spirv.mlir.addressof` operation.
+*   Adds the `spirv.EntryPoint` and `spirv.ExecutionMode` operations into the
+    `spirv.module` for the entry function.
 
 ## Serialization and deserialization
 
@@ -966,16 +966,16 @@ assembler/disassembler in the [SPIRV-Tools][SpirvTools] project.
 A few transformations are performed in the process of serialization because of
 the representational 
diff erences between SPIR-V dialect and binary format:
 
-*   Attributes on `spv.module` are emitted as their corresponding SPIR-V
+*   Attributes on `spirv.module` are emitted as their corresponding SPIR-V
     instructions.
 *   Types are serialized into `OpType*` instructions in the SPIR-V binary module
     section for types, constants, and global variables.
-*   `spv.Constant`s are unified and placed in the SPIR-V binary module section
+*   `spirv.Constant`s are unified and placed in the SPIR-V binary module section
     for types, constants, and global variables.
 *   Attributes on ops, if not part of the op's binary encoding, are emitted as
     `OpDecorate*` instructions in the SPIR-V binary module section for
     decorations.
-*   `spv.mlir.selection`s and `spv.mlir.loop`s are emitted as basic blocks with `Op*Merge`
+*   `spirv.mlir.selection`s and `spirv.mlir.loop`s are emitted as basic blocks with `Op*Merge`
     instructions in the header block as required by the binary format.
 *   Block arguments are materialized as `OpPhi` instructions at the beginning of
     the corresponding blocks.
@@ -984,20 +984,20 @@ Similarly, a few transformations are performed during deserialization:
 
 *   Instructions for execution environment requirements (extensions,
     capabilities, extended instruction sets, etc.) will be placed as attributes
-    on `spv.module`.
+    on `spirv.module`.
 *   `OpType*` instructions will be converted into proper `mlir::Type`s.
-*   `OpConstant*` instructions are materialized as `spv.Constant` at each use
+*   `OpConstant*` instructions are materialized as `spirv.Constant` at each use
     site.
-*   `OpVariable` instructions will be converted to `spv.GlobalVariable` ops if
-    in module-level; otherwise they will be converted into `spv.Variable` ops.
+*   `OpVariable` instructions will be converted to `spirv.GlobalVariable` ops if
+    in module-level; otherwise they will be converted into `spirv.Variable` ops.
 *   Every use of a module-level `OpVariable` instruction will materialize a
-    `spv.mlir.addressof` op to turn the symbol of the corresponding
-    `spv.GlobalVariable` into an SSA value.
+    `spirv.mlir.addressof` op to turn the symbol of the corresponding
+    `spirv.GlobalVariable` into an SSA value.
 *   Every use of a `OpSpecConstant` instruction will materialize a
-    `spv.mlir.referenceof` op to turn the symbol of the corresponding
-    `spv.SpecConstant` into an SSA value.
+    `spirv.mlir.referenceof` op to turn the symbol of the corresponding
+    `spirv.SpecConstant` into an SSA value.
 *   `OpPhi` instructions are converted to block arguments.
-*   Structured control flow are placed inside `spv.mlir.selection` and `spv.mlir.loop`.
+*   Structured control flow are placed inside `spirv.mlir.selection` and `spirv.mlir.loop`.
 
 ## Conversions
 
@@ -1017,12 +1017,12 @@ lowering described below implements both these requirements.)
 
 The `mlir::spirv::SPIRVConversionTarget` class derives from the
 `mlir::ConversionTarget` class and serves as a utility to define a conversion
-target satisfying a given [`spv.target_env`](#target-environment). It registers
+target satisfying a given [`spirv.target_env`](#target-environment). It registers
 proper hooks to check the dynamic legality of SPIR-V ops. Users can further
 register other legality constraints into the returned `SPIRVConversionTarget`.
 
 `spirv::lookupTargetEnvOrDefault()` is a handy utility function to query an
-`spv.target_env` attached in the input IR or use the default to construct a
+`spirv.target_env` attached in the input IR or use the default to construct a
 `SPIRVConversionTarget`.
 
 ### `SPIRVTypeConverter`
@@ -1052,7 +1052,7 @@ directly.
 (TODO: Convert other vectors of lengths to scalars or arrays)
 
 [Builtin memref types][MlirMemrefType] with static shape and stride are
-converted to `spv.ptr<spv.struct<spv.array<...>>>`s. The resultant SPIR-V array
+converted to `spirv.ptr<spirv.struct<spirv.array<...>>>`s. The resultant SPIR-V array
 types have the same element type as the source memref and its number of elements
 is obtained from the layout specification of the memref. The storage class of
 the pointer type are derived from the memref's memory space with
@@ -1063,24 +1063,24 @@ the pointer type are derived from the memref's memory space with
 #### Setting layout for shader interface variables
 
 SPIR-V validation rules for shaders require composite objects to be explicitly
-laid out. If a `spv.GlobalVariable` is not explicitly laid out, the utility
+laid out. If a `spirv.GlobalVariable` is not explicitly laid out, the utility
 method `mlir::spirv::decorateType` implements a layout consistent with
 the [Vulkan shader requirements][VulkanShaderInterface].
 
 #### Creating builtin variables
 
-In SPIR-V dialect, builtins are represented using `spv.GlobalVariable`s, with
-`spv.mlir.addressof` used to get a handle to the builtin as an SSA value.  The
-method `mlir::spirv::getBuiltinVariableValue` creates a `spv.GlobalVariable` for
-the builtin in the current `spv.module` if it does not exist already, and
-returns an SSA value generated from an `spv.mlir.addressof` operation.
+In SPIR-V dialect, builtins are represented using `spirv.GlobalVariable`s, with
+`spirv.mlir.addressof` used to get a handle to the builtin as an SSA value.  The
+method `mlir::spirv::getBuiltinVariableValue` creates a `spirv.GlobalVariable` for
+the builtin in the current `spirv.module` if it does not exist already, and
+returns an SSA value generated from an `spirv.mlir.addressof` operation.
 
 ### Current conversions to SPIR-V
 
 Using the above infrastructure, conversions are implemented from
 
 *   [Arithmetic Dialect][MlirArithmeticDialect]
-*   [GPU Dialect][MlirGpuDialect] : A gpu.module is converted to a `spv.module`.
+*   [GPU Dialect][MlirGpuDialect] : A gpu.module is converted to a `spirv.module`.
     A gpu.function within this module is lowered as an entry function.
 
 ## Code organization
@@ -1160,7 +1160,7 @@ These common utilities are implemented in the `MLIRSPIRVConversion` and
 
 ## Rationale
 
-### Lowering `memref`s to `!spv.array<..>` and `!spv.rtarray<..>`.
+### Lowering `memref`s to `!spirv.array<..>` and `!spirv.rtarray<..>`.
 
 The LLVM dialect lowers `memref` types to a `MemrefDescriptor`:
 
@@ -1177,8 +1177,8 @@ struct MemrefDescriptor {
 ```
 
 In SPIR-V dialect, we chose not to use a `MemrefDescriptor`. Instead a `memref`
-is lowered directly to a `!spv.ptr<!spv.array<nelts x elem_type>>` when the
-`memref` is statically shaped, and `!spv.ptr<!spv.rtarray<elem_type>>` when the
+is lowered directly to a `!spirv.ptr<!spirv.array<nelts x elem_type>>` when the
+`memref` is statically shaped, and `!spirv.ptr<!spirv.rtarray<elem_type>>` when the
 `memref` is dynamically shaped. The rationale behind this choice is described
 below.
 
@@ -1198,7 +1198,7 @@ below.
     Vulkan-capable device we can target; basically ruling out mobile support..
 
 1.  An alternative to having one level of indirection (as is the case with
-    `MemrefDescriptor`s), is to embed the `!spv.array` or `!spv.rtarray`
+    `MemrefDescriptor`s), is to embed the `!spirv.array` or `!spirv.rtarray`
     directly in the `MemrefDescriptor`, Having such a descriptor at the ABI
     boundary implies that the first few bytes of the input/output buffers would
     need to be reserved for shape/stride information. This adds an unnecessary
@@ -1364,7 +1364,7 @@ conversion as well, the pattern must inherit from the
 `mlir::spirv::SPIRVTypeConverter`.  If the operation has a region, [signature
 conversion][MlirDialectConversionSignatureConversion] might be needed as well.
 
-**Note**: The current validation rules of `spv.module` require that all
+**Note**: The current validation rules of `spirv.module` require that all
 operations contained within its region are valid operations in the SPIR-V
 dialect.
 

diff  --git a/mlir/docs/PassManagement.md b/mlir/docs/PassManagement.md
index 409a15e27669..0fe78510bcc3 100644
--- a/mlir/docs/PassManagement.md
+++ b/mlir/docs/PassManagement.md
@@ -379,7 +379,7 @@ For example, the following `.mlir`:
 
 ```mlir
 module {
-  spv.module "Logical" "GLSL450" {
+  spirv.module "Logical" "GLSL450" {
     func @foo() {
       ...
     }
@@ -391,8 +391,8 @@ Has the nesting structure of:
 
 ```
 `builtin.module`
-  `spv.module`
-    `spv.func`
+  `spirv.module`
+    `spirv.func`
 ```
 
 Below is an example of constructing a pipeline that operates on the above
@@ -419,7 +419,7 @@ OpPassManager &nestedFunctionPM = nestedModulePM.nest<func::FuncOp>();
 nestedFunctionPM.addPass(std::make_unique<MyFunctionPass>());
 
 // Nest an op-agnostic pass manager. This will operate on any viable
-// operation, e.g. func.func, spv.func, spv.module, builtin.module, etc.
+// operation, e.g. func.func, spirv.func, spirv.module, builtin.module, etc.
 OpPassManager &nestedAnyPM = nestedModulePM.nestAny();
 nestedAnyPM.addPass(createCanonicalizePass());
 nestedAnyPM.addPass(createCSEPass());

diff  --git a/mlir/docs/SPIRVToLLVMDialectConversion.md b/mlir/docs/SPIRVToLLVMDialectConversion.md
index fff11f4f363f..7a3bc7c62bd9 100644
--- a/mlir/docs/SPIRVToLLVMDialectConversion.md
+++ b/mlir/docs/SPIRVToLLVMDialectConversion.md
@@ -45,7 +45,7 @@ A SPIR-V pointer also takes a Storage Class. At the moment, conversion does
 
 SPIR-V Dialect                                | LLVM Dialect
 :-------------------------------------------: | :-------------------------:
-`!spv.ptr< <element-type>, <storage-class> >` | `!llvm.ptr<<element-type>>`
+`!spirv.ptr< <element-type>, <storage-class> >` | `!llvm.ptr<<element-type>>`
 
 ### Array types
 
@@ -60,8 +60,8 @@ are also mapped to LLVM array.
 
 SPIR-V Dialect                         | LLVM Dialect
 :------------------------------------: | :-------------------------------------:
-`!spv.array<<count> x <element-type>>` | `!llvm.array<<count> x <element-type>>`
-`!spv.rtarray< <element-type> >`       | `!llvm.array<0 x <element-type>>`
+`!spirv.array<<count> x <element-type>>` | `!llvm.array<<count> x <element-type>>`
+`!spirv.rtarray< <element-type> >`       | `!llvm.array<0 x <element-type>>`
 
 ### Struct types
 
@@ -86,11 +86,11 @@ moment. Hence, we adhere to the following mapping:
     a design would require index recalculation in the conversion of ops that
     involve memory addressing.
 
-Examples of SPIR-V struct conversion are: ```mlir !spv.struct<i8, i32> =>
-!llvm.struct<packed (i8, i32)> !spv.struct<i8 [0], i32 [4]> => !llvm.struct<(i8,
+Examples of SPIR-V struct conversion are: ```mlir !spirv.struct<i8, i32> =>
+!llvm.struct<packed (i8, i32)> !spirv.struct<i8 [0], i32 [4]> => !llvm.struct<(i8,
 i32)>
 
-// error !spv.struct<i8 [0], i32 [8]> ```
+// error !spirv.struct<i8 [0], i32 [8]> ```
 
 ### Not implemented types
 
@@ -110,23 +110,23 @@ case-by-case basis.
 ### Arithmetic ops
 
 SPIR-V arithmetic ops mostly have a direct equivalent in LLVM Dialect. Such
-exceptions as `spv.SMod` and `spv.FMod` are rare.
+exceptions as `spirv.SMod` and `spirv.FMod` are rare.
 
 SPIR-V Dialect op | LLVM Dialect op
 :---------------: | :-------------:
-`spv.FAdd`        | `llvm.fadd`
-`spv.FDiv`        | `llvm.fdiv`
-`spv.FNegate`     | `llvm.fneg`
-`spv.FMul`        | `llvm.fmul`
-`spv.FRem`        | `llvm.frem`
-`spv.FSub`        | `llvm.fsub`
-`spv.IAdd`        | `llvm.add`
-`spv.IMul`        | `llvm.mul`
-`spv.ISub`        | `llvm.sub`
-`spv.SDiv`        | `llvm.sdiv`
-`spv.SRem`        | `llvm.srem`
-`spv.UDiv`        | `llvm.udiv`
-`spv.UMod`        | `llvm.urem`
+`spirv.FAdd`        | `llvm.fadd`
+`spirv.FDiv`        | `llvm.fdiv`
+`spirv.FNegate`     | `llvm.fneg`
+`spirv.FMul`        | `llvm.fmul`
+`spirv.FRem`        | `llvm.frem`
+`spirv.FSub`        | `llvm.fsub`
+`spirv.IAdd`        | `llvm.add`
+`spirv.IMul`        | `llvm.mul`
+`spirv.ISub`        | `llvm.sub`
+`spirv.SDiv`        | `llvm.sdiv`
+`spirv.SRem`        | `llvm.srem`
+`spirv.UDiv`        | `llvm.udiv`
+`spirv.UMod`        | `llvm.urem`
 
 ### Bitwise ops
 
@@ -140,41 +140,41 @@ LLVM:
 
 SPIR-V Dialect op | LLVM Dialect op
 :---------------: | :-------------:
-`spv.BitwiseAnd`  | `llvm.and`
-`spv.BitwiseOr`   | `llvm.or`
-`spv.BitwiseXor`  | `llvm.xor`
+`spirv.BitwiseAnd`  | `llvm.and`
+`spirv.BitwiseOr`   | `llvm.or`
+`spirv.BitwiseXor`  | `llvm.xor`
 
 Also, some of bitwise ops can be modelled with LLVM intrinsics:
 
 SPIR-V Dialect op | LLVM Dialect intrinsic
 :---------------: | :--------------------:
-`spv.BitCount`    | `llvm.intr.ctpop`
-`spv.BitReverse`  | `llvm.intr.bitreverse`
+`spirv.BitCount`    | `llvm.intr.ctpop`
+`spirv.BitReverse`  | `llvm.intr.bitreverse`
 
-#### `spv.Not`
+#### `spirv.Not`
 
-`spv.Not` is modelled with a `xor` operation with a mask with all bits set.
+`spirv.Not` is modelled with a `xor` operation with a mask with all bits set.
 
 ```mlir
                             %mask = llvm.mlir.constant(-1 : i32) : i32
-%0 = spv.Not %op : i32  =>  %0  = llvm.xor %op, %mask : i32
+%0 = spirv.Not %op : i32  =>  %0  = llvm.xor %op, %mask : i32
 ```
 
 #### Bitfield ops
 
-SPIR-V dialect has three bitfield ops: `spv.BitFieldInsert`,
-`spv.BitFieldSExtract` and `spv.BitFieldUExtract`. This section will first
+SPIR-V dialect has three bitfield ops: `spirv.BitFieldInsert`,
+`spirv.BitFieldSExtract` and `spirv.BitFieldUExtract`. This section will first
 outline the general design of conversion patterns for this ops, and then
 describe each of them.
 
 All of these ops take `base`, `offset` and `count` (`insert` for
-`spv.BitFieldInsert`) as arguments. There are two important things to note:
+`spirv.BitFieldInsert`) as arguments. There are two important things to note:
 
 *   `offset` and `count` are always scalar. This means that we can have the
     following case:
 
     ```mlir
-    %0 = spv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i8, i8
+    %0 = spirv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i8, i8
     ```
 
     To be able to proceed with conversion algorithms described below, all
@@ -213,7 +213,7 @@ All of these ops take `base`, `offset` and `count` (`insert` for
 Now, having these two cases in mind, we can proceed with conversion for the ops
 and their operands.
 
-##### `spv.BitFieldInsert`
+##### `spirv.BitFieldInsert`
 
 This operation is implemented as a series of LLVM Dialect operations. First step
 would be to create a mask with bits set outside [`offset`, `offset` + `count` -
@@ -234,12 +234,12 @@ would be to create a mask with bits set outside [`offset`, `offset` + `count` -
 // Insert new bits
 // %sh_insert = llvm.shl %insert, %offset : i32
 // %res       = llvm.or %new_base, %sh_insert : i32
-%res = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
+%res = spirv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
 ```
 
-##### `spv.BitFieldSExtract`
+##### `spirv.BitFieldSExtract`
 
-To implement `spv.BitFieldSExtract`, `base` is shifted left by [sizeof(`base`) -
+To implement `spirv.BitFieldSExtract`, `base` is shifted left by [sizeof(`base`) -
 (`count` + `offset`)], so that the bit at `offset` + `count` - 1 is the most
 significant bit. After, the result is shifted right, filling the bits with the
 sign bit.
@@ -254,12 +254,12 @@ sign bit.
 // %sh_left = llvm.shl %base, %t1 : i32
 // %t2      = llvm.add %offset, %t1 : i32
 // %res     = llvm.ashr %sh_left, %t2 : i32
-%res = spv.BitFieldSExtract %base, %offset, %count : i32, i32, i32
+%res = spirv.BitFieldSExtract %base, %offset, %count : i32, i32, i32
 ```
 
-##### `spv.BitFieldUExtract`
+##### `spirv.BitFieldUExtract`
 
-For this op a similar pattern as for `spv.BitFieldInsert` is used. First, a mask
+For this op a similar pattern as for `spirv.BitFieldInsert` is used. First, a mask
 with bits set at [0, `count` - 1] is created. Then `base` is shifted by `offset`
 and the mask is applied.
 
@@ -272,7 +272,7 @@ and the mask is applied.
 // Shift Base and apply mask
 // %sh_base   = llvm.lshr %base, %offset : i32
 // %res       = llvm.and %sh_base, %mask : i32
-%res = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
+%res = spirv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
 ```
 
 ### Cast ops
@@ -281,12 +281,12 @@ and the mask is applied.
 
 SPIR-V Dialect op | LLVM Dialect op
 :---------------: | :-------------:
-`spv.ConvertFToS` | `llvm.fptosi`
-`spv.ConvertFToU` | `llvm.fptoui`
-`spv.ConvertSToF` | `llvm.sitofp`
-`spv.ConvertUToF` | `llvm.uitofp`
+`spirv.ConvertFToS` | `llvm.fptosi`
+`spirv.ConvertFToU` | `llvm.fptoui`
+`spirv.ConvertSToF` | `llvm.sitofp`
+`spirv.ConvertUToF` | `llvm.uitofp`
 
-#### spv.Bitcast
+#### spirv.Bitcast
 
 This operation has a direct counterpart in LLVM: `llvm.bitcast`. It is treated
 separately since it also supports pointer to pointer bit pattern-preserving type
@@ -294,26 +294,26 @@ conversion, apart from regular scalar or vector of numerical type.
 
 #### Special cases
 
-Special cases include `spv.FConvert`, `spv.SConvert` and `spv.UConvert`. These
+Special cases include `spirv.FConvert`, `spirv.SConvert` and `spirv.UConvert`. These
 operations are either a truncate or extend. Let's denote the operand component
 width as A, and result component width as R. Then, the following mappings are
 used:
 
-##### `spv.FConvert`
+##### `spirv.FConvert`
 
 Case  | LLVM Dialect op
 :---: | :-------------:
 A < R | `llvm.fpext`
 A > R | `llvm.fptrunc`
 
-##### `spv.SConvert`
+##### `spirv.SConvert`
 
 Case  | LLVM Dialect op
 :---: | :-------------:
 A < R | `llvm.sext`
 A > R | `llvm.trunc`
 
-##### `spv.UConvert`
+##### `spirv.UConvert`
 
 Case  | LLVM Dialect op
 :---: | :-------------:
@@ -330,55 +330,55 @@ SPIR-V comparison ops are mapped to LLVM `icmp` and `fcmp` operations.
 
 SPIR-V Dialect op            | LLVM Dialect op
 :--------------------------: | :---------------:
-`spv.IEqual`                 | `llvm.icmp "eq"`
-`spv.INotEqual`              | `llvm.icmp "ne"`
-`spv.FOrdEqual`              | `llvm.fcmp "oeq"`
-`spv.FOrdGreaterThan`        | `llvm.fcmp "ogt"`
-`spv.FOrdGreaterThanEqual`   | `llvm.fcmp "oge"`
-`spv.FOrdLessThan`           | `llvm.fcmp "olt"`
-`spv.FOrdLessThanEqual`      | `llvm.fcmp "ole"`
-`spv.FOrdNotEqual`           | `llvm.fcmp "one"`
-`spv.FUnordEqual`            | `llvm.fcmp "ueq"`
-`spv.FUnordGreaterThan`      | `llvm.fcmp "ugt"`
-`spv.FUnordGreaterThanEqual` | `llvm.fcmp "uge"`
-`spv.FUnordLessThan`         | `llvm.fcmp "ult"`
-`spv.FUnordLessThanEqual`    | `llvm.fcmp "ule"`
-`spv.FUnordNotEqual`         | `llvm.fcmp "une"`
-`spv.SGreaterThan`           | `llvm.icmp "sgt"`
-`spv.SGreaterThanEqual`      | `llvm.icmp "sge"`
-`spv.SLessThan`              | `llvm.icmp "slt"`
-`spv.SLessThanEqual`         | `llvm.icmp "sle"`
-`spv.UGreaterThan`           | `llvm.icmp "ugt"`
-`spv.UGreaterThanEqual`      | `llvm.icmp "uge"`
-`spv.ULessThan`              | `llvm.icmp "ult"`
-`spv.ULessThanEqual`         | `llvm.icmp "ule"`
+`spirv.IEqual`                 | `llvm.icmp "eq"`
+`spirv.INotEqual`              | `llvm.icmp "ne"`
+`spirv.FOrdEqual`              | `llvm.fcmp "oeq"`
+`spirv.FOrdGreaterThan`        | `llvm.fcmp "ogt"`
+`spirv.FOrdGreaterThanEqual`   | `llvm.fcmp "oge"`
+`spirv.FOrdLessThan`           | `llvm.fcmp "olt"`
+`spirv.FOrdLessThanEqual`      | `llvm.fcmp "ole"`
+`spirv.FOrdNotEqual`           | `llvm.fcmp "one"`
+`spirv.FUnordEqual`            | `llvm.fcmp "ueq"`
+`spirv.FUnordGreaterThan`      | `llvm.fcmp "ugt"`
+`spirv.FUnordGreaterThanEqual` | `llvm.fcmp "uge"`
+`spirv.FUnordLessThan`         | `llvm.fcmp "ult"`
+`spirv.FUnordLessThanEqual`    | `llvm.fcmp "ule"`
+`spirv.FUnordNotEqual`         | `llvm.fcmp "une"`
+`spirv.SGreaterThan`           | `llvm.icmp "sgt"`
+`spirv.SGreaterThanEqual`      | `llvm.icmp "sge"`
+`spirv.SLessThan`              | `llvm.icmp "slt"`
+`spirv.SLessThanEqual`         | `llvm.icmp "sle"`
+`spirv.UGreaterThan`           | `llvm.icmp "ugt"`
+`spirv.UGreaterThanEqual`      | `llvm.icmp "uge"`
+`spirv.ULessThan`              | `llvm.icmp "ult"`
+`spirv.ULessThanEqual`         | `llvm.icmp "ule"`
 
 ### Composite ops
 
-Currently, conversion supports rewrite patterns for `spv.CompositeExtract` and
-`spv.CompositeInsert`. We distinguish two cases for these operations: when the
+Currently, conversion supports rewrite patterns for `spirv.CompositeExtract` and
+`spirv.CompositeInsert`. We distinguish two cases for these operations: when the
 composite object is a vector, and when the composite object is of a non-vector
 type (*i.e.* struct, array or runtime array).
 
 Composite type | SPIR-V Dialect op      | LLVM Dialect op
 :------------: | :--------------------: | :-------------------:
-vector         | `spv.CompositeExtract` | `llvm.extractelement`
-vector         | `spv.CompositeInsert`  | `llvm.insertelement`
-non-vector     | `spv.CompositeExtract` | `llvm.extractvalue`
-non-vector     | `spv.CompositeInsert`  | `llvm.insertvalue`
+vector         | `spirv.CompositeExtract` | `llvm.extractelement`
+vector         | `spirv.CompositeInsert`  | `llvm.insertelement`
+non-vector     | `spirv.CompositeExtract` | `llvm.extractvalue`
+non-vector     | `spirv.CompositeInsert`  | `llvm.insertvalue`
 
-### `spv.EntryPoint` and `spv.ExecutionMode`
+### `spirv.EntryPoint` and `spirv.ExecutionMode`
 
 First of all, it is important to note that there is no direct representation of
 entry points in LLVM. At the moment, we use the following approach:
 
-*   `spv.EntryPoint` is simply removed.
+*   `spirv.EntryPoint` is simply removed.
 
-*   In contrast, `spv.ExecutionMode` may contain important information about the
+*   In contrast, `spirv.ExecutionMode` may contain important information about the
     entry point. For example, `LocalSize` provides information about the
     work-group size that can be reused.
 
-    In order to preserve this information, `spv.ExecutionMode` is converted to a
+    In order to preserve this information, `spirv.ExecutionMode` is converted to a
     struct global variable that stores the execution mode id and any variables
     associated with it. In C, the struct has the structure shown below.
 
@@ -392,7 +392,7 @@ entry points in LLVM. At the moment, we use the following approach:
     ```
 
     ```mlir
-    // spv.ExecutionMode @empty "ContractionOff"
+    // spirv.ExecutionMode @empty "ContractionOff"
     llvm.mlir.global external constant @{{.*}}() : !llvm.struct<(i32)> {
       %0   = llvm.mlir.undef : !llvm.struct<(i32)>
       %1   = llvm.mlir.constant(31 : i32) : i32
@@ -409,17 +409,17 @@ emulate SPIR-V ops behaviour:
 
 SPIR-V Dialect op     | LLVM Dialect op
 :-------------------: | :--------------:
-`spv.LogicalAnd`      | `llvm.and`
-`spv.LogicalOr`       | `llvm.or`
-`spv.LogicalEqual`    | `llvm.icmp "eq"`
-`spv.LogicalNotEqual` | `llvm.icmp "ne"`
+`spirv.LogicalAnd`      | `llvm.and`
+`spirv.LogicalOr`       | `llvm.or`
+`spirv.LogicalEqual`    | `llvm.icmp "eq"`
+`spirv.LogicalNotEqual` | `llvm.icmp "ne"`
 
-`spv.LogicalNot` has the same conversion pattern as bitwise `spv.Not`. It is
+`spirv.LogicalNot` has the same conversion pattern as bitwise `spirv.Not`. It is
 modelled with `xor` operation with a mask with all bits set.
 
 ```mlir
                                   %mask = llvm.mlir.constant(-1 : i1) : i1
-%0 = spv.LogicalNot %op : i1  =>  %0    = llvm.xor %op, %mask : i1
+%0 = spirv.LogicalNot %op : i1  =>  %0    = llvm.xor %op, %mask : i1
 ```
 
 ### Memory ops
@@ -427,17 +427,17 @@ modelled with `xor` operation with a mask with all bits set.
 This section describes the conversion patterns for SPIR-V dialect operations
 that concern memory.
 
-#### `spv.AccessChain`
+#### `spirv.AccessChain`
 
-`spv.AccessChain` is mapped to `llvm.getelementptr` op. In order to create a
-valid LLVM op, we also add a 0 index to the `spv.AccessChain`'s indices list in
+`spirv.AccessChain` is mapped to `llvm.getelementptr` op. In order to create a
+valid LLVM op, we also add a 0 index to the `spirv.AccessChain`'s indices list in
 order to go through the pointer.
 
 ```mlir
 // Access the 1st element of the array
-%i   = spv.Constant 1: i32
-%var = spv.Variable : !spv.ptr<!spv.struct<f32, !spv.array<4xf32>>, Function>
-%el  = spv.AccessChain %var[%i, %i] : !spv.ptr<!spv.struct<f32, !spv.array<4xf32>>, Function>, i32, i32
+%i   = spirv.Constant 1: i32
+%var = spirv.Variable : !spirv.ptr<!spirv.struct<f32, !spirv.array<4xf32>>, Function>
+%el  = spirv.AccessChain %var[%i, %i] : !spirv.ptr<!spirv.struct<f32, !spirv.array<4xf32>>, Function>, i32, i32
 
 // Corresponding LLVM dialect code
 %i   = ...
@@ -446,44 +446,44 @@ order to go through the pointer.
 %el  = llvm.getelementptr %var[%0, %i, %i] : (!llvm.ptr<struct<packed (f32, array<4 x f32>)>>, i32, i32, i32)
 ```
 
-#### `spv.Load` and `spv.Store`
+#### `spirv.Load` and `spirv.Store`
 
 These ops are converted to their LLVM counterparts: `llvm.load` and
 `llvm.store`. If the op has a memory access attribute, then there are the
 following cases, based on the value of the attribute:
 
 *   **Aligned**: alignment is passed on to LLVM op builder, for example: `mlir
-    // llvm.store %ptr, %val {alignment = 4 : i64} : !llvm.ptr<f32> spv.Store
+    // llvm.store %ptr, %val {alignment = 4 : i64} : !llvm.ptr<f32> spirv.Store
     "Function" %ptr, %val ["Aligned", 4] : f32`
 *   **None**: same case as if there is no memory access attribute.
 
 *   **Nontemporal**: set `nontemporal` flag, for example: `mlir // %res =
-    llvm.load %ptr {nontemporal} : !llvm.ptr<f32> %res = spv.Load "Function"
+    llvm.load %ptr {nontemporal} : !llvm.ptr<f32> %res = spirv.Load "Function"
     %ptr ["Nontemporal"] : f32`
 
 *   **Volatile**: mark the op as `volatile`, for example: `mlir // %res =
-    llvm.load volatile %ptr : !llvm.ptr<f32> %res = spv.Load "Function" %ptr
+    llvm.load volatile %ptr : !llvm.ptr<f32> %res = spirv.Load "Function" %ptr
     ["Volatile"] : f32` Otherwise the conversion fails as other cases
     (`MakePointerAvailable`, `MakePointerVisible`, `NonPrivatePointer`) are not
     supported yet.
 
-#### `spv.GlobalVariable` and `spv.mlir.addressof`
+#### `spirv.GlobalVariable` and `spirv.mlir.addressof`
 
-`spv.GlobalVariable` is modelled with `llvm.mlir.global` op. However, there is a
+`spirv.GlobalVariable` is modelled with `llvm.mlir.global` op. However, there is a
 
diff erence that has to be pointed out.
 
 In SPIR-V dialect, the global variable returns a pointer, whereas in LLVM
 dialect the global holds an actual value. This 
diff erence is handled by
-`spv.mlir.addressof` and `llvm.mlir.addressof` ops that both return a pointer
+`spirv.mlir.addressof` and `llvm.mlir.addressof` ops that both return a pointer
 and are used to reference the global.
 
 ```mlir
 // Original SPIR-V module
-spv.module Logical GLSL450 {
-  spv.GlobalVariable @struct : !spv.ptr<!spv.struct<f32, !spv.array<10xf32>>, Private>
-  spv.func @func() -> () "None" {
-    %0 = spv.mlir.addressof @struct : !spv.ptr<!spv.struct<f32, !spv.array<10xf32>>, Private>
-    spv.Return
+spirv.module Logical GLSL450 {
+  spirv.GlobalVariable @struct : !spirv.ptr<!spirv.struct<f32, !spirv.array<10xf32>>, Private>
+  spirv.func @func() -> () "None" {
+    %0 = spirv.mlir.addressof @struct : !spirv.ptr<!spirv.struct<f32, !spirv.array<10xf32>>, Private>
+    spirv.Return
   }
 }
 
@@ -518,29 +518,29 @@ If the global variable's pointer has `Input` storage class, then a `constant`
 flag is added to LLVM op:
 
 ```mlir
-spv.GlobalVariable @var : !spv.ptr<f32, Input>    =>    llvm.mlir.global external constant @var() : f32
+spirv.GlobalVariable @var : !spirv.ptr<f32, Input>    =>    llvm.mlir.global external constant @var() : f32
 ```
 
-#### `spv.Variable`
+#### `spirv.Variable`
 
-Per SPIR-V dialect spec, `spv.Variable` allocates an object in memory, resulting
-in a pointer to it, which can be used with `spv.Load` and `spv.Store`. It is
+Per SPIR-V dialect spec, `spirv.Variable` allocates an object in memory, resulting
+in a pointer to it, which can be used with `spirv.Load` and `spirv.Store`. It is
 also a function-level variable.
 
-`spv.Variable` is modelled as `llvm.alloca` op. If initialized, an additional
+`spirv.Variable` is modelled as `llvm.alloca` op. If initialized, an additional
 store instruction is used. Note that there is no initialization for arrays and
 structs since constants of these types are not supported in LLVM dialect (TODO).
-Also, at the moment initialization is only possible via `spv.Constant`.
+Also, at the moment initialization is only possible via `spirv.Constant`.
 
 ```mlir
 // Conversion of VariableOp without initialization
                                                                %size = llvm.mlir.constant(1 : i32) : i32
-%res = spv.Variable : !spv.ptr<vector<3xf32>, Function>   =>   %res  = llvm.alloca  %size x vector<3xf32> : (i32) -> !llvm.ptr<vec<3 x f32>>
+%res = spirv.Variable : !spirv.ptr<vector<3xf32>, Function>   =>   %res  = llvm.alloca  %size x vector<3xf32> : (i32) -> !llvm.ptr<vec<3 x f32>>
 
 // Conversion of VariableOp with initialization
                                                                %c    = llvm.mlir.constant(0 : i64) : i64
-%c   = spv.Constant 0 : i64                                    %size = llvm.mlir.constant(1 : i32) : i32
-%res = spv.Variable init(%c) : !spv.ptr<i64, Function>    =>   %res  = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr<i64>
+%c   = spirv.Constant 0 : i64                                    %size = llvm.mlir.constant(1 : i32) : i32
+%res = spirv.Variable init(%c) : !spirv.ptr<i64, Function>    =>   %res  = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr<i64>
                                                                llvm.store %c, %res : !llvm.ptr<i64>
 ```
 
@@ -557,8 +557,8 @@ section.
 
 SPIR-V Dialect op | LLVM Dialect op
 :---------------: | :---------------:
-`spv.Select`      | `llvm.select`
-`spv.Undef`       | `llvm.mlir.undef`
+`spirv.Select`      | `llvm.select`
+`spirv.Undef`       | `llvm.mlir.undef`
 
 ### Shift ops
 
@@ -578,21 +578,21 @@ bitwidth. This leads to the following conversions:
 
 ```mlir
 // Shift without extension
-%res0 = spv.ShiftRightArithmetic %0, %2 : i32, i32  =>  %res0 = llvm.ashr %0, %2 : i32
+%res0 = spirv.ShiftRightArithmetic %0, %2 : i32, i32  =>  %res0 = llvm.ashr %0, %2 : i32
 
 // Shift with extension
                                                         %ext  = llvm.sext %1 : i16 to i32
-%res1 = spv.ShiftRightArithmetic %0, %1 : i32, i16  =>  %res1 = llvm.ashr %0, %ext: i32
+%res1 = spirv.ShiftRightArithmetic %0, %1 : i32, i16  =>  %res1 = llvm.ashr %0, %ext: i32
 ```
 
-### `spv.Constant`
+### `spirv.Constant`
 
-At the moment `spv.Constant` conversion supports scalar and vector constants
+At the moment `spirv.Constant` conversion supports scalar and vector constants
 **only**.
 
 #### Mapping
 
-`spv.Constant` is mapped to `llvm.mlir.constant`. This is a straightforward
+`spirv.Constant` is mapped to `llvm.mlir.constant`. This is a straightforward
 conversion pattern with a special case when the argument is signed or unsigned.
 
 #### Special case
@@ -609,10 +609,10 @@ cover all possible corner cases.
 
 ```mlir
 // %0 = llvm.mlir.constant(0 : i8) : i8
-%0 = spv.Constant  0 : i8
+%0 = spirv.Constant  0 : i8
 
 // %1 = llvm.mlir.constant(dense<[2, 3, 4]> : vector<3xi32>) : vector<3xi32>
-%1 = spv.Constant dense<[2, 3, 4]> : vector<3xui32>
+%1 = spirv.Constant dense<[2, 3, 4]> : vector<3xui32>
 ```
 
 ### Not implemented ops
@@ -626,67 +626,67 @@ There is no support of the following ops:
 
 As well as:
 
-*   spv.CompositeConstruct
-*   spv.ControlBarrier
-*   spv.CopyMemory
-*   spv.FMod
-*   spv.GL.Acos
-*   spv.GL.Asin
-*   spv.GL.Atan
-*   spv.GL.Cosh
-*   spv.GL.FSign
-*   spv.GL.SAbs
-*   spv.GL.Sinh
-*   spv.GL.SSign
-*   spv.MemoryBarrier
-*   spv.mlir.referenceof
-*   spv.SMod
-*   spv.SpecConstant
-*   spv.Unreachable
-*   spv.VectorExtractDynamic
+*   spirv.CompositeConstruct
+*   spirv.ControlBarrier
+*   spirv.CopyMemory
+*   spirv.FMod
+*   spirv.GL.Acos
+*   spirv.GL.Asin
+*   spirv.GL.Atan
+*   spirv.GL.Cosh
+*   spirv.GL.FSign
+*   spirv.GL.SAbs
+*   spirv.GL.Sinh
+*   spirv.GL.SSign
+*   spirv.MemoryBarrier
+*   spirv.mlir.referenceof
+*   spirv.SMod
+*   spirv.SpecConstant
+*   spirv.Unreachable
+*   spirv.VectorExtractDynamic
 
 ## Control flow conversion
 
 ### Branch ops
 
-`spv.Branch` and `spv.BranchConditional` are mapped to `llvm.br` and
-`llvm.cond_br`. Branch weights for `spv.BranchConditional` are mapped to
+`spirv.Branch` and `spirv.BranchConditional` are mapped to `llvm.br` and
+`llvm.cond_br`. Branch weights for `spirv.BranchConditional` are mapped to
 corresponding `branch_weights` attribute of `llvm.cond_br`. When translated to
 proper LLVM, `branch_weights` are converted into LLVM metadata associated with
 the conditional branch.
 
-### `spv.FunctionCall`
+### `spirv.FunctionCall`
 
-`spv.FunctionCall` maps to `llvm.call`. For example:
+`spirv.FunctionCall` maps to `llvm.call`. For example:
 
 ```mlir
-%0 = spv.FunctionCall @foo() : () -> i32    =>    %0 = llvm.call @foo() : () -> f32
-spv.FunctionCall @bar(%0) : (i32) -> ()     =>    llvm.call @bar(%0) : (f32) -> ()
+%0 = spirv.FunctionCall @foo() : () -> i32    =>    %0 = llvm.call @foo() : () -> f32
+spirv.FunctionCall @bar(%0) : (i32) -> ()     =>    llvm.call @bar(%0) : (f32) -> ()
 ```
 
-### `spv.mlir.selection` and `spv.mlir.loop`
+### `spirv.mlir.selection` and `spirv.mlir.loop`
 
-Control flow within `spv.mlir.selection` and `spv.mlir.loop` is lowered directly
+Control flow within `spirv.mlir.selection` and `spirv.mlir.loop` is lowered directly
 to LLVM via branch ops. The conversion can only be applied to selection or loop
 with all blocks being reachable. Moreover, selection and loop control attributes
 (such as `Flatten` or `Unroll`) are not supported at the moment.
 
 ```mlir
 // Conversion of selection
-%cond = spv.Constant true                               %cond = llvm.mlir.constant(true) : i1
-spv.mlir.selection {
-  spv.BranchConditional %cond, ^true, ^false            llvm.cond_br %cond, ^true, ^false
+%cond = spirv.Constant true                               %cond = llvm.mlir.constant(true) : i1
+spirv.mlir.selection {
+  spirv.BranchConditional %cond, ^true, ^false            llvm.cond_br %cond, ^true, ^false
 
 ^true:                                                                                              ^true:
   // True block code                                    // True block code
-  spv.Branch ^merge                             =>      llvm.br ^merge
+  spirv.Branch ^merge                             =>      llvm.br ^merge
 
 ^false:                                               ^false:
   // False block code                                   // False block code
-  spv.Branch ^merge                                     llvm.br ^merge
+  spirv.Branch ^merge                                     llvm.br ^merge
 
 ^merge:                                               ^merge:
-  spv.mlir.merge                                            llvm.br ^continue
+  spirv.mlir.merge                                            llvm.br ^continue
 }
 // Remaining code                                                                           ^continue:
                                                         // Remaining code
@@ -694,24 +694,24 @@ spv.mlir.selection {
 
 ```mlir
 // Conversion of loop
-%cond = spv.Constant true                               %cond = llvm.mlir.constant(true) : i1
-spv.mlir.loop {
-  spv.Branch ^header                                    llvm.br ^header
+%cond = spirv.Constant true                               %cond = llvm.mlir.constant(true) : i1
+spirv.mlir.loop {
+  spirv.Branch ^header                                    llvm.br ^header
 
 ^header:                                              ^header:
   // Header code                                        // Header code
-  spv.BranchConditional %cond, ^body, ^merge    =>      llvm.cond_br %cond, ^body, ^merge
+  spirv.BranchConditional %cond, ^body, ^merge    =>      llvm.cond_br %cond, ^body, ^merge
 
 ^body:                                                ^body:
   // Body code                                          // Body code
-  spv.Branch ^continue                                  llvm.br ^continue
+  spirv.Branch ^continue                                  llvm.br ^continue
 
 ^continue:                                            ^continue:
   // Continue code                                      // Continue code
-  spv.Branch ^header                                    llvm.br ^header
+  spirv.Branch ^header                                    llvm.br ^header
 
 ^merge:                                               ^merge:
-  spv.mlir.merge                                            llvm.br ^remaining
+  spirv.mlir.merge                                            llvm.br ^remaining
 }
 // Remaining code                                     ^remaining:
                                                         // Remaining code
@@ -730,45 +730,45 @@ mapped to LLVM Dialect.
 
 SPIR-V Dialect op | LLVM Dialect op
 :---------------: | :----------------:
-`spv.GL.Ceil`     | `llvm.intr.ceil`
-`spv.GL.Cos`      | `llvm.intr.cos`
-`spv.GL.Exp`      | `llvm.intr.exp`
-`spv.GL.FAbs`     | `llvm.intr.fabs`
-`spv.GL.Floor`    | `llvm.intr.floor`
-`spv.GL.FMax`     | `llvm.intr.maxnum`
-`spv.GL.FMin`     | `llvm.intr.minnum`
-`spv.GL.Log`      | `llvm.intr.log`
-`spv.GL.Sin`      | `llvm.intr.sin`
-`spv.GL.Sqrt`     | `llvm.intr.sqrt`
-`spv.GL.SMax`     | `llvm.intr.smax`
-`spv.GL.SMin`     | `llvm.intr.smin`
+`spirv.GL.Ceil`     | `llvm.intr.ceil`
+`spirv.GL.Cos`      | `llvm.intr.cos`
+`spirv.GL.Exp`      | `llvm.intr.exp`
+`spirv.GL.FAbs`     | `llvm.intr.fabs`
+`spirv.GL.Floor`    | `llvm.intr.floor`
+`spirv.GL.FMax`     | `llvm.intr.maxnum`
+`spirv.GL.FMin`     | `llvm.intr.minnum`
+`spirv.GL.Log`      | `llvm.intr.log`
+`spirv.GL.Sin`      | `llvm.intr.sin`
+`spirv.GL.Sqrt`     | `llvm.intr.sqrt`
+`spirv.GL.SMax`     | `llvm.intr.smax`
+`spirv.GL.SMin`     | `llvm.intr.smin`
 
 ### Special cases
 
-`spv.InverseSqrt` is mapped to:
+`spirv.InverseSqrt` is mapped to:
 
 ```mlir
                                            %one  = llvm.mlir.constant(1.0 : f32) : f32
-%res = spv.InverseSqrt %arg : f32    =>    %sqrt = "llvm.intr.sqrt"(%arg) : (f32) -> f32
+%res = spirv.InverseSqrt %arg : f32    =>    %sqrt = "llvm.intr.sqrt"(%arg) : (f32) -> f32
                                            %res  = fdiv %one, %sqrt : f32
 ```
 
-`spv.Tan` is mapped to:
+`spirv.Tan` is mapped to:
 
 ```mlir
                                    %sin = "llvm.intr.sin"(%arg) : (f32) -> f32
-%res = spv.Tan %arg : f32    =>    %cos = "llvm.intr.cos"(%arg) : (f32) -> f32
+%res = spirv.Tan %arg : f32    =>    %cos = "llvm.intr.cos"(%arg) : (f32) -> f32
                                    %res = fdiv %sin, %cos : f32
 ```
 
-`spv.Tanh` is modelled using the equality `tanh(x) = {exp(2x) - 1}/{exp(2x) +
+`spirv.Tanh` is modelled using the equality `tanh(x) = {exp(2x) - 1}/{exp(2x) +
 1}`:
 
 ```mlir
                                      %two   = llvm.mlir.constant(2.0: f32) : f32
                                      %2xArg = llvm.fmul %two, %arg : f32
                                      %exp   = "llvm.intr.exp"(%2xArg) : (f32) -> f32
-%res = spv.Tanh %arg : f32     =>    %one   = llvm.mlir.constant(1.0 : f32) : f32
+%res = spirv.Tanh %arg : f32     =>    %one   = llvm.mlir.constant(1.0 : f32) : f32
                                      %num   = llvm.fsub %exp, %one : f32
                                      %den   = llvm.fadd %exp, %one : f32
                                      %res   = llvm.fdiv %num, %den : f32
@@ -779,7 +779,7 @@ SPIR-V Dialect op | LLVM Dialect op
 This section describes the conversion of function-related operations from SPIR-V
 to LLVM dialect.
 
-### `spv.func`
+### `spirv.func`
 
 This op declares or defines a SPIR-V function and it is converted to
 `llvm.func`. This conversion handles signature conversion, and function control
@@ -798,7 +798,7 @@ DontInline                         | `noinline`
 Pure                               | `readonly`
 Const                              | `readnone`
 
-### `spv.Return` and `spv.ReturnValue`
+### `spirv.Return` and `spirv.ReturnValue`
 
 In LLVM IR, functions may return either 1 or 0 value. Hence, we map both ops to
 `llvm.return` with or without a return value.
@@ -806,13 +806,13 @@ In LLVM IR, functions may return either 1 or 0 value. Hence, we map both ops to
 ## Module ops
 
 Module in SPIR-V has one region that contains one block. It is defined via
-`spv.module` op that also takes a range of attributes:
+`spirv.module` op that also takes a range of attributes:
 
 *   Addressing model
 *   Memory model
 *   Version-Capability-Extension attribute
 
-`spv.module` is converted into `ModuleOp`. This plays a role of enclosing scope
+`spirv.module` is converted into `ModuleOp`. This plays a role of enclosing scope
 to LLVM ops. At the moment, SPIR-V module attributes are ignored.
 
 ## `mlir-spirv-cpu-runner`
@@ -873,12 +873,12 @@ func.func @main() {
 Lowering `gpu` dialect to SPIR-V dialect results in
 
 ```mlir
-spv.module @__spv__foo /*VCE triple and other metadata here*/ {
-  spv.GlobalVariable @__spv__foo_arg bind(0,0) : ...
-  spv.func @bar() {
+spirv.module @__spv__foo /*VCE triple and other metadata here*/ {
+  spirv.GlobalVariable @__spv__foo_arg bind(0,0) : ...
+  spirv.func @bar() {
     // Kernel code.
   }
-  spv.EntryPoint @bar, ...
+  spirv.EntryPoint @bar, ...
 }
 
 func.func @main() {
@@ -897,12 +897,12 @@ Then, the lowering from standard dialect to LLVM dialect is applied to the host
 code.
 
 ```mlir
-spv.module @__spv__foo /*VCE triple and other metadata here*/ {
-  spv.GlobalVariable @__spv__foo_arg bind(0,0) : ...
-  spv.func @bar() {
+spirv.module @__spv__foo /*VCE triple and other metadata here*/ {
+  spirv.GlobalVariable @__spv__foo_arg bind(0,0) : ...
+  spirv.func @bar() {
     // Kernel code.
   }
-  spv.EntryPoint @bar, ...
+  spirv.EntryPoint @bar, ...
 }
 
 // Kernel function declaration.

diff  --git a/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRV.h b/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRV.h
index add196d441ea..132866f756f9 100644
--- a/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRV.h
+++ b/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRV.h
@@ -20,7 +20,7 @@ class SPIRVTypeConverter;
 
 /// Appends to a pattern list additional patterns for translating GPU Ops to
 /// SPIR-V ops. For a gpu.func to be converted, it should have a
-/// spv.entry_point_abi attribute.
+/// spirv.entry_point_abi attribute.
 void populateGPUToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
                                 RewritePatternSet &patterns);
 } // namespace mlir

diff  --git a/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRVPass.h b/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRVPass.h
index 809cbabe2071..c091a71630f1 100644
--- a/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRVPass.h
+++ b/mlir/include/mlir/Conversion/GPUToSPIRV/GPUToSPIRVPass.h
@@ -25,7 +25,7 @@ class OperationPass;
 #include "mlir/Conversion/Passes.h.inc"
 
 /// Creates a pass to convert GPU kernel ops to corresponding SPIR-V ops. For a
-/// gpu.func to be converted, it should have a spv.entry_point_abi attribute.
+/// gpu.func to be converted, it should have a spirv.entry_point_abi attribute.
 /// If `mapMemorySpace` is true, performs MemRef memory space to SPIR-V mapping
 /// according to default Vulkan rules first.
 std::unique_ptr<OperationPass<ModuleOp>>

diff  --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 5f4848164793..a5a655bfa035 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -408,7 +408,7 @@ def ConvertGPUToSPIRV : Pass<"convert-gpu-to-spirv", "ModuleOp"> {
     resources. By default, parameters to a `gpu.func` op will be converted to
     global variables. These global variables will be assigned sequential binding
     numbers following their order in the original `gpu.func` op, starting from
-    0, in set 0. One can attach `spv.interface_var_abi` to those parameters
+    0, in set 0. One can attach `spirv.interface_var_abi` to those parameters
     to control the set and binding if wanted.
   }];
   let constructor = "mlir::createConvertGPUToSPIRVPass()";

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/SPIRV/IR/CMakeLists.txt
index 8090be83f886..de3148d0a42d 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/CMakeLists.txt
@@ -1,4 +1,4 @@
-add_mlir_dialect(SPIRVOps spv)
+add_mlir_dialect(SPIRVOps spirv)
 add_mlir_doc(SPIRVOps SPIRVOps Dialects/ -gen-op-doc)
 
 set(LLVM_TARGET_DEFINITIONS SPIRVBase.td)

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td
index beed00e077b3..cdb4c0a6cb8e 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVArithmeticOps.td
@@ -61,14 +61,14 @@ def SPV_FAddOp : SPV_ArithmeticBinaryOp<"FAdd", SPV_Float, [Commutative]> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fadd-op ::= ssa-id `=` `spv.FAdd` ssa-use, ssa-use
+    fadd-op ::= ssa-id `=` `spirv.FAdd` ssa-use, ssa-use
                           `:` float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.FAdd %0, %1 : f32
-    %5 = spv.FAdd %2, %3 : vector<4xf32>
+    %4 = spirv.FAdd %0, %1 : f32
+    %5 = spirv.FAdd %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -91,15 +91,15 @@ def SPV_FDivOp : SPV_ArithmeticBinaryOp<"FDiv", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fdiv-op ::= ssa-id `=` `spv.FDiv` ssa-use, ssa-use
+    fdiv-op ::= ssa-id `=` `spirv.FDiv` ssa-use, ssa-use
                           `:` float-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FDiv %0, %1 : f32
-    %5 = spv.FDiv %2, %3 : vector<4xf32>
+    %4 = spirv.FDiv %0, %1 : f32
+    %5 = spirv.FDiv %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -126,14 +126,14 @@ def SPV_FModOp : SPV_ArithmeticBinaryOp<"FMod", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fmod-op ::= ssa-id `=` `spv.FMod` ssa-use, ssa-use
+    fmod-op ::= ssa-id `=` `spirv.FMod` ssa-use, ssa-use
                           `:` float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.FMod %0, %1 : f32
-    %5 = spv.FMod %2, %3 : vector<4xf32>
+    %4 = spirv.FMod %0, %1 : f32
+    %5 = spirv.FMod %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -156,15 +156,15 @@ def SPV_FMulOp : SPV_ArithmeticBinaryOp<"FMul", SPV_Float, [Commutative]> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fmul-op ::= `spv.FMul` ssa-use, ssa-use
+    fmul-op ::= `spirv.FMul` ssa-use, ssa-use
                           `:` float-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FMul %0, %1 : f32
-    %5 = spv.FMul %2, %3 : vector<4xf32>
+    %4 = spirv.FMul %0, %1 : f32
+    %5 = spirv.FMul %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -191,14 +191,14 @@ def SPV_FNegateOp : SPV_ArithmeticUnaryOp<"FNegate", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fmul-op ::= `spv.FNegate` ssa-use `:` float-scalar-vector-type
+    fmul-op ::= `spirv.FNegate` ssa-use `:` float-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.FNegate %0 : f32
-    %3 = spv.FNegate %2 : vector<4xf32>
+    %1 = spirv.FNegate %0 : f32
+    %3 = spirv.FNegate %2 : vector<4xf32>
     ```
   }];
 }
@@ -225,15 +225,15 @@ def SPV_FRemOp : SPV_ArithmeticBinaryOp<"FRem", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    frem-op ::= ssa-id `=` `spv.FRemOp` ssa-use, ssa-use
+    frem-op ::= ssa-id `=` `spirv.FRemOp` ssa-use, ssa-use
                           `:` float-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FRemOp %0, %1 : f32
-    %5 = spv.FRemOp %2, %3 : vector<4xf32>
+    %4 = spirv.FRemOp %0, %1 : f32
+    %5 = spirv.FRemOp %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -255,15 +255,15 @@ def SPV_FSubOp : SPV_ArithmeticBinaryOp<"FSub", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fsub-op ::= ssa-id `=` `spv.FRemOp` ssa-use, ssa-use
+    fsub-op ::= ssa-id `=` `spirv.FRemOp` ssa-use, ssa-use
                           `:` float-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FRemOp %0, %1 : f32
-    %5 = spv.FRemOp %2, %3 : vector<4xf32>
+    %4 = spirv.FRemOp %0, %1 : f32
+    %5 = spirv.FRemOp %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -292,15 +292,15 @@ def SPV_IAddOp : SPV_ArithmeticBinaryOp<"IAdd",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    iadd-op ::= ssa-id `=` `spv.IAdd` ssa-use, ssa-use
+    iadd-op ::= ssa-id `=` `spirv.IAdd` ssa-use, ssa-use
                           `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.IAdd %0, %1 : i32
-    %5 = spv.IAdd %2, %3 : vector<4xi32>
+    %4 = spirv.IAdd %0, %1 : i32
+    %5 = spirv.IAdd %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -340,8 +340,8 @@ def SPV_IAddCarryOp : SPV_BinaryOp<"IAddCarry",
     #### Example:
 
     ```mlir
-    %2 = spv.IAddCarry %0, %1 : !spv.struct<(i32, i32)>
-    %2 = spv.IAddCarry %0, %1 : !spv.struct<(vector<2xi32>, vector<2xi32>)>
+    %2 = spirv.IAddCarry %0, %1 : !spirv.struct<(i32, i32)>
+    %2 = spirv.IAddCarry %0, %1 : !spirv.struct<(vector<2xi32>, vector<2xi32>)>
     ```
   }];
 
@@ -389,15 +389,15 @@ def SPV_IMulOp : SPV_ArithmeticBinaryOp<"IMul",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    imul-op ::= ssa-id `=` `spv.IMul` ssa-use, ssa-use
+    imul-op ::= ssa-id `=` `spirv.IMul` ssa-use, ssa-use
                           `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.IMul %0, %1 : i32
-    %5 = spv.IMul %2, %3 : vector<4xi32>
+    %4 = spirv.IMul %0, %1 : i32
+    %5 = spirv.IMul %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -429,15 +429,15 @@ def SPV_ISubOp : SPV_ArithmeticBinaryOp<"ISub",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    isub-op ::= `spv.ISub` ssa-use, ssa-use
+    isub-op ::= `spirv.ISub` ssa-use, ssa-use
                           `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.ISub %0, %1 : i32
-    %5 = spv.ISub %2, %3 : vector<4xi32>
+    %4 = spirv.ISub %0, %1 : i32
+    %5 = spirv.ISub %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -479,8 +479,8 @@ def SPV_ISubBorrowOp : SPV_BinaryOp<"ISubBorrow", SPV_AnyStruct, SPV_Integer,
     #### Example:
 
     ```mlir
-    %2 = spv.ISubBorrow %0, %1 : !spv.struct<(i32, i32)>
-    %2 = spv.ISubBorrow %0, %1 : !spv.struct<(vector<2xi32>, vector<2xi32>)>
+    %2 = spirv.ISubBorrow %0, %1 : !spirv.struct<(i32, i32)>
+    %2 = spirv.ISubBorrow %0, %1 : !spirv.struct<(vector<2xi32>, vector<2xi32>)>
     ```
   }];
 
@@ -525,15 +525,15 @@ def SPV_SDivOp : SPV_ArithmeticBinaryOp<"SDiv",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    sdiv-op ::= ssa-id `=` `spv.SDiv` ssa-use, ssa-use
+    sdiv-op ::= ssa-id `=` `spirv.SDiv` ssa-use, ssa-use
                            `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.SDiv %0, %1 : i32
-    %5 = spv.SDiv %2, %3 : vector<4xi32>
+    %4 = spirv.SDiv %0, %1 : i32
+    %5 = spirv.SDiv %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -565,14 +565,14 @@ def SPV_SModOp : SPV_ArithmeticBinaryOp<"SMod",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    smod-op ::= ssa-id `=` `spv.SMod` ssa-use, ssa-use
+    smod-op ::= ssa-id `=` `spirv.SMod` ssa-use, ssa-use
                            `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.SMod %0, %1 : i32
-    %5 = spv.SMod %2, %3 : vector<4xi32>
+    %4 = spirv.SMod %0, %1 : i32
+    %5 = spirv.SMod %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -599,8 +599,8 @@ def SPV_SNegateOp : SPV_ArithmeticUnaryOp<"SNegate",
     #### Example:
 
     ```mlir
-    %1 = spv.SNegate %0 : i32
-    %3 = spv.SNegate %2 : vector<4xi32>
+    %1 = spirv.SNegate %0 : i32
+    %3 = spirv.SNegate %2 : vector<4xi32>
     ```
   }];
 }
@@ -631,14 +631,14 @@ def SPV_SRemOp : SPV_ArithmeticBinaryOp<"SRem",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    srem-op ::= ssa-id `=` `spv.SRem` ssa-use, ssa-use
+    srem-op ::= ssa-id `=` `spirv.SRem` ssa-use, ssa-use
                            `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.SRem %0, %1 : i32
-    %5 = spv.SRem %2, %3 : vector<4xi32>
+    %4 = spirv.SRem %0, %1 : i32
+    %5 = spirv.SRem %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -665,14 +665,14 @@ def SPV_UDivOp : SPV_ArithmeticBinaryOp<"UDiv",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    udiv-op ::= ssa-id `=` `spv.UDiv` ssa-use, ssa-use
+    udiv-op ::= ssa-id `=` `spirv.UDiv` ssa-use, ssa-use
                            `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.UDiv %0, %1 : i32
-    %5 = spv.UDiv %2, %3 : vector<4xi32>
+    %4 = spirv.UDiv %0, %1 : i32
+    %5 = spirv.UDiv %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -696,7 +696,7 @@ def SPV_VectorTimesScalarOp : SPV_Op<"VectorTimesScalar", [NoSideEffect]> {
     #### Example:
 
     ```mlir
-    %0 = spv.VectorTimesScalar %vector, %scalar : vector<4xf32>
+    %0 = spirv.VectorTimesScalar %vector, %scalar : vector<4xf32>
     ```
   }];
 
@@ -733,14 +733,14 @@ def SPV_UModOp : SPV_ArithmeticBinaryOp<"UMod",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    umod-op ::= ssa-id `=` `spv.UMod` ssa-use, ssa-use
+    umod-op ::= ssa-id `=` `spirv.UMod` ssa-use, ssa-use
                            `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.UMod %0, %1 : i32
-    %5 = spv.UMod %2, %3 : vector<4xi32>
+    %4 = spirv.UMod %0, %1 : i32
+    %5 = spirv.UMod %2, %3 : vector<4xi32>
 
     ```
   }];

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td
index 275ee45f1775..01f2b8c70bd3 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAtomicOps.td
@@ -79,15 +79,15 @@ def SPV_AtomicAndOp : SPV_AtomicUpdateWithValueOp<"AtomicAnd", []> {
     memory-semantics ::= `"None"` | `"Acquire"` | "Release"` | ...
 
     atomic-and-op ::=
-        `spv.AtomicAnd` scope memory-semantics
+        `spirv.AtomicAnd` scope memory-semantics
                         ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicAnd "Device" "None" %pointer, %value :
-                       !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicAnd "Device" "None" %pointer, %value :
+                       !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -131,7 +131,7 @@ def SPV_AtomicCompareExchangeOp : SPV_Op<"AtomicCompareExchange", []> {
 
     ```
     atomic-compare-exchange-op ::=
-        `spv.AtomicCompareExchange` scope memory-semantics memory-semantics
+        `spirv.AtomicCompareExchange` scope memory-semantics memory-semantics
                                     ssa-use `,` ssa-use `,` ssa-use
                                     `:` spv-pointer-type
     ```mlir
@@ -139,9 +139,9 @@ def SPV_AtomicCompareExchangeOp : SPV_Op<"AtomicCompareExchange", []> {
     #### Example:
 
     ```
-    %0 = spv.AtomicCompareExchange "Workgroup" "Acquire" "None"
+    %0 = spirv.AtomicCompareExchange "Workgroup" "Acquire" "None"
                                     %pointer, %value, %comparator
-                                    : !spv.ptr<i32, WorkGroup>
+                                    : !spirv.ptr<i32, WorkGroup>
     ```
   }];
 
@@ -173,7 +173,7 @@ def SPV_AtomicCompareExchangeWeakOp : SPV_Op<"AtomicCompareExchangeWeak", []> {
 
     ```
     atomic-compare-exchange-weak-op ::=
-        `spv.AtomicCompareExchangeWeak` scope memory-semantics memory-semantics
+        `spirv.AtomicCompareExchangeWeak` scope memory-semantics memory-semantics
                                         ssa-use `,` ssa-use `,` ssa-use
                                         `:` spv-pointer-type
     ```
@@ -181,9 +181,9 @@ def SPV_AtomicCompareExchangeWeakOp : SPV_Op<"AtomicCompareExchangeWeak", []> {
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicCompareExchangeWeak "Workgroup" "Acquire" "None"
+    %0 = spirv.AtomicCompareExchangeWeak "Workgroup" "Acquire" "None"
                                        %pointer, %value, %comparator
-                                       : !spv.ptr<i32, WorkGroup>
+                                       : !spirv.ptr<i32, WorkGroup>
     ```
   }];
 
@@ -236,15 +236,15 @@ def SPV_AtomicExchangeOp : SPV_Op<"AtomicExchange", []> {
 
      ```
     atomic-exchange-op ::=
-        `spv.AtomicCompareExchange` scope memory-semantics
+        `spirv.AtomicCompareExchange` scope memory-semantics
                                     ssa-use `,` ssa-use `:` spv-pointer-type
     ```mlir
 
     #### Example:
 
     ```
-    %0 = spv.AtomicExchange "Workgroup" "Acquire" %pointer, %value,
-                            : !spv.ptr<i32, WorkGroup>
+    %0 = spirv.AtomicExchange "Workgroup" "Acquire" %pointer, %value,
+                            : !spirv.ptr<i32, WorkGroup>
     ```
   }];
 
@@ -290,15 +290,15 @@ def SPV_EXTAtomicFAddOp : SPV_ExtVendorOp<"AtomicFAdd", []> {
 
     ```
     atomic-fadd-op ::=
-        `spv.EXT.AtomicFAdd` scope memory-semantics
+        `spirv.EXT.AtomicFAdd` scope memory-semantics
                             ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.EXT.AtomicFAdd "Device" "None" %pointer, %value :
-                           !spv.ptr<f32, StorageBuffer>
+    %0 = spirv.EXT.AtomicFAdd "Device" "None" %pointer, %value :
+                           !spirv.ptr<f32, StorageBuffer>
     ```mlir
   }];
 
@@ -349,15 +349,15 @@ def SPV_AtomicIAddOp : SPV_AtomicUpdateWithValueOp<"AtomicIAdd", []> {
 
     ```
     atomic-iadd-op ::=
-        `spv.AtomicIAdd` scope memory-semantics
+        `spirv.AtomicIAdd` scope memory-semantics
                          ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicIAdd "Device" "None" %pointer, %value :
-                        !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicIAdd "Device" "None" %pointer, %value :
+                        !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -389,15 +389,15 @@ def SPV_AtomicIDecrementOp : SPV_AtomicUpdateOp<"AtomicIDecrement", []> {
 
     ```
     atomic-idecrement-op ::=
-        `spv.AtomicIDecrement` scope memory-semantics ssa-use
+        `spirv.AtomicIDecrement` scope memory-semantics ssa-use
                                `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicIDecrement "Device" "None" %pointer :
-                              !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicIDecrement "Device" "None" %pointer :
+                              !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -428,15 +428,15 @@ def SPV_AtomicIIncrementOp : SPV_AtomicUpdateOp<"AtomicIIncrement", []> {
 
     ```
     atomic-iincrement-op ::=
-        `spv.AtomicIIncrement` scope memory-semantics ssa-use
+        `spirv.AtomicIIncrement` scope memory-semantics ssa-use
                                `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicIncrement "Device" "None" %pointer :
-                             !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicIncrement "Device" "None" %pointer :
+                             !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -470,15 +470,15 @@ def SPV_AtomicISubOp : SPV_AtomicUpdateWithValueOp<"AtomicISub", []> {
 
     ```
     atomic-isub-op ::=
-        `spv.AtomicISub` scope memory-semantics
+        `spirv.AtomicISub` scope memory-semantics
                          ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicISub "Device" "None" %pointer, %value :
-                        !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicISub "Device" "None" %pointer, %value :
+                        !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -511,15 +511,15 @@ def SPV_AtomicOrOp : SPV_AtomicUpdateWithValueOp<"AtomicOr", []> {
 
     ```
     atomic-or-op ::=
-        `spv.AtomicOr` scope memory-semantics
+        `spirv.AtomicOr` scope memory-semantics
                        ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicOr "Device" "None" %pointer, %value :
-                      !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicOr "Device" "None" %pointer, %value :
+                      !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -553,15 +553,15 @@ def SPV_AtomicSMaxOp : SPV_AtomicUpdateWithValueOp<"AtomicSMax", []> {
 
     ```
     atomic-smax-op ::=
-        `spv.AtomicSMax` scope memory-semantics
+        `spirv.AtomicSMax` scope memory-semantics
                          ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicSMax "Device" "None" %pointer, %value :
-                        !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicSMax "Device" "None" %pointer, %value :
+                        !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -595,15 +595,15 @@ def SPV_AtomicSMinOp : SPV_AtomicUpdateWithValueOp<"AtomicSMin", []> {
 
     ```
     atomic-smin-op ::=
-        `spv.AtomicSMin` scope memory-semantics
+        `spirv.AtomicSMin` scope memory-semantics
                          ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicSMin "Device" "None" %pointer, %value :
-                        !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicSMin "Device" "None" %pointer, %value :
+                        !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -637,15 +637,15 @@ def SPV_AtomicUMaxOp : SPV_AtomicUpdateWithValueOp<"AtomicUMax", [UnsignedOp]> {
 
     ```
     atomic-umax-op ::=
-        `spv.AtomicUMax` scope memory-semantics
+        `spirv.AtomicUMax` scope memory-semantics
                          ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicUMax "Device" "None" %pointer, %value :
-                        !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicUMax "Device" "None" %pointer, %value :
+                        !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -679,15 +679,15 @@ def SPV_AtomicUMinOp : SPV_AtomicUpdateWithValueOp<"AtomicUMin", [UnsignedOp]> {
 
     ```
     atomic-umin-op ::=
-        `spv.AtomicUMin` scope memory-semantics
+        `spirv.AtomicUMin` scope memory-semantics
                          ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicUMin "Device" "None" %pointer, %value :
-                        !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicUMin "Device" "None" %pointer, %value :
+                        !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }
@@ -721,15 +721,15 @@ def SPV_AtomicXorOp : SPV_AtomicUpdateWithValueOp<"AtomicXor", []> {
 
     ```
     atomic-xor-op ::=
-        `spv.AtomicXor` scope memory-semantics
+        `spirv.AtomicXor` scope memory-semantics
                         ssa-use `,` ssa-use `:` spv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.AtomicXor "Device" "None" %pointer, %value :
-                       !spv.ptr<i32, StorageBuffer>
+    %0 = spirv.AtomicXor "Device" "None" %pointer, %value :
+                       !spirv.ptr<i32, StorageBuffer>
     ```
   }];
 }

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAttributes.h b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAttributes.h
index 990a4901a160..3afbaf27dde0 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAttributes.h
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVAttributes.h
@@ -58,7 +58,7 @@ class InterfaceVarABIAttr
   static InterfaceVarABIAttr get(IntegerAttr descriptorSet, IntegerAttr binding,
                                  IntegerAttr storageClass);
 
-  /// Returns the attribute kind's name (without the 'spv.' prefix).
+  /// Returns the attribute kind's name (without the 'spirv.' prefix).
   static StringRef getKindName();
 
   /// Returns descriptor set.
@@ -90,7 +90,7 @@ class VerCapExtAttr
   static VerCapExtAttr get(IntegerAttr version, ArrayAttr capabilities,
                            ArrayAttr extensions);
 
-  /// Returns the attribute kind's name (without the 'spv.' prefix).
+  /// Returns the attribute kind's name (without the 'spirv.' prefix).
   static StringRef getKindName();
 
   /// Returns the version.
@@ -142,7 +142,7 @@ class TargetEnvAttr
                            DeviceType deviceType, uint32_t deviceId,
                            ResourceLimitsAttr limits);
 
-  /// Returns the attribute kind's name (without the 'spv.' prefix).
+  /// Returns the attribute kind's name (without the 'spirv.' prefix).
   static StringRef getKindName();
 
   /// Returns the (version, capabilities, extensions) triple attribute.

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td
index 09e547df4705..4fbb83eb1b00 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBarrierOps.td
@@ -58,13 +58,13 @@ def SPV_ControlBarrierOp : SPV_Op<"ControlBarrier", []> {
 
     memory-semantics ::= `"None"` | `"Acquire"` | "Release"` | ...
 
-    control-barrier-op ::= `spv.ControlBarrier` scope, scope, memory-semantics
+    control-barrier-op ::= `spirv.ControlBarrier` scope, scope, memory-semantics
     ```
 
     #### Example:
 
     ```mlir
-    spv.ControlBarrier "Workgroup", "Device", "Acquire|UniformMemory"
+    spirv.ControlBarrier "Workgroup", "Device", "Acquire|UniformMemory"
 
     ```
   }];
@@ -109,13 +109,13 @@ def SPV_MemoryBarrierOp : SPV_Op<"MemoryBarrier", []> {
 
     memory-semantics ::= `"None"` | `"Acquire"` | `"Release"` | ...
 
-    memory-barrier-op ::= `spv.MemoryBarrier` scope, memory-semantics
+    memory-barrier-op ::= `spirv.MemoryBarrier` scope, memory-semantics
     ```
 
     #### Example:
 
     ```mlir
-    spv.MemoryBarrier "Device", "Acquire|UniformMemory"
+    spirv.MemoryBarrier "Device", "Acquire|UniformMemory"
 
     ```
   }];

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
index 14559bd40bab..41b556387c9c 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBase.td
@@ -24,7 +24,7 @@ include "mlir/Dialect/SPIRV/IR/SPIRVAvailability.td"
 //===----------------------------------------------------------------------===//
 
 def SPIRV_Dialect : Dialect {
-  let name = "spv";
+  let name = "spirv";
 
   let summary = "The SPIR-V dialect in MLIR.";
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
index 916bb5569877..472bc454e755 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVBitOps.td
@@ -85,15 +85,15 @@ def SPV_BitCountOp : SPV_BitUnaryOp<"BitCount", []> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    bitcount-op ::= ssa-id `=` `spv.BitCount` ssa-use
+    bitcount-op ::= ssa-id `=` `spirv.BitCount` ssa-use
                                `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.BitCount %0: i32
-    %3 = spv.BitCount %1: vector<4xi32>
+    %2 = spirv.BitCount %0: i32
+    %3 = spirv.BitCount %1: vector<4xi32>
     ```
   }];
 }
@@ -135,7 +135,7 @@ def SPV_BitFieldInsertOp : SPV_Op<"BitFieldInsert",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    bitfield-insert-op ::= ssa-id `=` `spv.BitFieldInsert` ssa-use `,` ssa-use
+    bitfield-insert-op ::= ssa-id `=` `spirv.BitFieldInsert` ssa-use `,` ssa-use
                                       `,` ssa-use `,` ssa-use
                                       `:` integer-scalar-vector-type
                                       `,` integer-type `,` integer-type
@@ -144,7 +144,7 @@ def SPV_BitFieldInsertOp : SPV_Op<"BitFieldInsert",
     #### Example:
 
     ```mlir
-    %0 = spv.BitFieldInsert %base, %insert, %offset, %count : vector<3xi32>, i8, i8
+    %0 = spirv.BitFieldInsert %base, %insert, %offset, %count : vector<3xi32>, i8, i8
     ```
   }];
 
@@ -207,7 +207,7 @@ def SPV_BitFieldSExtractOp : SPV_BitFieldExtractOp<"BitFieldSExtract",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    bitfield-extract-s-op ::= ssa-id `=` `spv.BitFieldSExtract` ssa-use
+    bitfield-extract-s-op ::= ssa-id `=` `spirv.BitFieldSExtract` ssa-use
                                          `,` ssa-use `,` ssa-use
                                          `:` integer-scalar-vector-type
                                          `,` integer-type `,` integer-type
@@ -216,7 +216,7 @@ def SPV_BitFieldSExtractOp : SPV_BitFieldExtractOp<"BitFieldSExtract",
     #### Example:
 
     ```mlir
-    %0 = spv.BitFieldSExtract %base, %offset, %count : vector<3xi32>, i8, i8
+    %0 = spirv.BitFieldSExtract %base, %offset, %count : vector<3xi32>, i8, i8
     ```
   }];
 
@@ -244,7 +244,7 @@ def SPV_BitFieldUExtractOp : SPV_BitFieldExtractOp<"BitFieldUExtract",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    bitfield-extract-u-op ::= ssa-id `=` `spv.BitFieldUExtract` ssa-use
+    bitfield-extract-u-op ::= ssa-id `=` `spirv.BitFieldUExtract` ssa-use
                                          `,` ssa-use `,` ssa-use
                                          `:` integer-scalar-vector-type
                                          `,` integer-type `,` integer-type
@@ -253,7 +253,7 @@ def SPV_BitFieldUExtractOp : SPV_BitFieldExtractOp<"BitFieldUExtract",
     #### Example:
 
     ```mlir
-    %0 = spv.BitFieldUExtract %base, %offset, %count : vector<3xi32>, i8, i8
+    %0 = spirv.BitFieldUExtract %base, %offset, %count : vector<3xi32>, i8, i8
     ```
   }];
 
@@ -285,15 +285,15 @@ def SPV_BitReverseOp : SPV_BitUnaryOp<"BitReverse", []> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    bitreverse-op ::= ssa-id `=` `spv.BitReverse` ssa-use
+    bitreverse-op ::= ssa-id `=` `spirv.BitReverse` ssa-use
                                  `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.BitReverse %0 : i32
-    %3 = spv.BitReverse %1 : vector<4xi32>
+    %2 = spirv.BitReverse %0 : i32
+    %3 = spirv.BitReverse %1 : vector<4xi32>
     ```
   }];
 
@@ -327,15 +327,15 @@ def SPV_BitwiseAndOp : SPV_BitBinaryOp<"BitwiseAnd",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    bitwise-and-op ::= ssa-id `=` `spv.BitwiseAnd` ssa-use, ssa-use
+    bitwise-and-op ::= ssa-id `=` `spirv.BitwiseAnd` ssa-use, ssa-use
                                   `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.BitwiseAnd %0, %1 : i32
-    %2 = spv.BitwiseAnd %0, %1 : vector<4xi32>
+    %2 = spirv.BitwiseAnd %0, %1 : i32
+    %2 = spirv.BitwiseAnd %0, %1 : vector<4xi32>
     ```
   }];
 }
@@ -362,15 +362,15 @@ def SPV_BitwiseOrOp : SPV_BitBinaryOp<"BitwiseOr",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    bitwise-or-op ::= ssa-id `=` `spv.BitwiseOr` ssa-use, ssa-use
+    bitwise-or-op ::= ssa-id `=` `spirv.BitwiseOr` ssa-use, ssa-use
                                   `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.BitwiseOr %0, %1 : i32
-    %2 = spv.BitwiseOr %0, %1 : vector<4xi32>
+    %2 = spirv.BitwiseOr %0, %1 : i32
+    %2 = spirv.BitwiseOr %0, %1 : vector<4xi32>
     ```
   }];
 }
@@ -397,15 +397,15 @@ def SPV_BitwiseXorOp : SPV_BitBinaryOp<"BitwiseXor",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    bitwise-xor-op ::= ssa-id `=` `spv.BitwiseXor` ssa-use, ssa-use
+    bitwise-xor-op ::= ssa-id `=` `spirv.BitwiseXor` ssa-use, ssa-use
                                   `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.BitwiseXor %0, %1 : i32
-    %2 = spv.BitwiseXor %0, %1 : vector<4xi32>
+    %2 = spirv.BitwiseXor %0, %1 : i32
+    %2 = spirv.BitwiseXor %0, %1 : vector<4xi32>
     ```
   }];
 }
@@ -440,7 +440,7 @@ def SPV_ShiftLeftLogicalOp : SPV_ShiftOp<"ShiftLeftLogical",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    shift-left-logical-op ::= ssa-id `=` `spv.ShiftLeftLogical`
+    shift-left-logical-op ::= ssa-id `=` `spirv.ShiftLeftLogical`
                                           ssa-use `,` ssa-use `:`
                                           integer-scalar-vector-type `,`
                                           integer-scalar-vector-type
@@ -449,8 +449,8 @@ def SPV_ShiftLeftLogicalOp : SPV_ShiftOp<"ShiftLeftLogical",
     #### Example:
 
     ```mlir
-    %2 = spv.ShiftLeftLogical %0, %1 : i32, i16
-    %5 = spv.ShiftLeftLogical %3, %4 : vector<3xi32>, vector<3xi16>
+    %2 = spirv.ShiftLeftLogical %0, %1 : i32, i16
+    %5 = spirv.ShiftLeftLogical %3, %4 : vector<3xi32>, vector<3xi16>
     ```
   }];
 }
@@ -482,7 +482,7 @@ def SPV_ShiftRightArithmeticOp : SPV_ShiftOp<"ShiftRightArithmetic",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    shift-right-arithmetic-op ::= ssa-id `=` `spv.ShiftRightArithmetic`
+    shift-right-arithmetic-op ::= ssa-id `=` `spirv.ShiftRightArithmetic`
                                               ssa-use `,` ssa-use `:`
                                               integer-scalar-vector-type `,`
                                               integer-scalar-vector-type
@@ -491,8 +491,8 @@ def SPV_ShiftRightArithmeticOp : SPV_ShiftOp<"ShiftRightArithmetic",
     #### Example:
 
     ```mlir
-    %2 = spv.ShiftRightArithmetic %0, %1 : i32, i16
-    %5 = spv.ShiftRightArithmetic %3, %4 : vector<3xi32>, vector<3xi16>
+    %2 = spirv.ShiftRightArithmetic %0, %1 : i32, i16
+    %5 = spirv.ShiftRightArithmetic %3, %4 : vector<3xi32>, vector<3xi16>
     ```
   }];
 }
@@ -525,7 +525,7 @@ def SPV_ShiftRightLogicalOp : SPV_ShiftOp<"ShiftRightLogical",
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    shift-right-logical-op ::= ssa-id `=` `spv.ShiftRightLogical`
+    shift-right-logical-op ::= ssa-id `=` `spirv.ShiftRightLogical`
                                            ssa-use `,` ssa-use `:`
                                            integer-scalar-vector-type `,`
                                            integer-scalar-vector-type
@@ -534,8 +534,8 @@ def SPV_ShiftRightLogicalOp : SPV_ShiftOp<"ShiftRightLogical",
     #### Example:
 
     ```mlir
-    %2 = spv.ShiftRightLogical %0, %1 : i32, i16
-    %5 = spv.ShiftRightLogical %3, %4 : vector<3xi32>, vector<3xi16>
+    %2 = spirv.ShiftRightLogical %0, %1 : i32, i16
+    %5 = spirv.ShiftRightLogical %3, %4 : vector<3xi32>, vector<3xi16>
     ```
   }];
 }
@@ -559,14 +559,14 @@ def SPV_NotOp : SPV_BitUnaryOp<"Not", [UsableInSpecConstantOp]> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                   `vector<` integer-literal `x` integer-type `>`
-    not-op ::= ssa-id `=` `spv.BitNot` ssa-use `:` integer-scalar-vector-type
+    not-op ::= ssa-id `=` `spirv.BitNot` ssa-use `:` integer-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.Not %0 : i32
-    %3 = spv.Not %1 : vector<4xi32>
+    %2 = spirv.Not %0 : i32
+    %3 = spirv.Not %1 : vector<4xi32>
     ```
   }];
 }

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td
index d95ed47bf486..a38b4dade637 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCLOps.td
@@ -131,15 +131,15 @@ def SPV_CLCeilOp : SPV_CLUnaryArithmeticOp<"ceil", 12, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    ceil-op ::= ssa-id `=` `spv.CL.ceil` ssa-use `:`
+    ceil-op ::= ssa-id `=` `spirv.CL.ceil` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.ceil %0 : f32
-    %3 = spv.CL.ceil %1 : vector<3xf16>
+    %2 = spirv.CL.ceil %0 : f32
+    %3 = spirv.CL.ceil %1 : vector<3xf16>
     ```
   }];
 }
@@ -161,15 +161,15 @@ def SPV_CLCosOp : SPV_CLUnaryArithmeticOp<"cos", 14, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    cos-op ::= ssa-id `=` `spv.CL.cos` ssa-use `:`
+    cos-op ::= ssa-id `=` `spirv.CL.cos` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.cos %0 : f32
-    %3 = spv.CL.cos %1 : vector<3xf16>
+    %2 = spirv.CL.cos %0 : f32
+    %3 = spirv.CL.cos %1 : vector<3xf16>
     ```
   }];
 }
@@ -193,15 +193,15 @@ def SPV_CLErfOp : SPV_CLUnaryArithmeticOp<"erf", 18, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    erf-op ::= ssa-id `=` `spv.CL.erf` ssa-use `:`
+    erf-op ::= ssa-id `=` `spirv.CL.erf` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.erf %0 : f32
-    %3 = spv.CL.erf %1 : vector<3xf16>
+    %2 = spirv.CL.erf %0 : f32
+    %3 = spirv.CL.erf %1 : vector<3xf16>
     ```
   }];
 }
@@ -224,14 +224,14 @@ def SPV_CLExpOp : SPV_CLUnaryArithmeticOp<"exp", 19, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    exp-op ::= ssa-id `=` `spv.CL.exp` ssa-use `:`
+    exp-op ::= ssa-id `=` `spirv.CL.exp` ssa-use `:`
                float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.CL.exp %0 : f32
-    %3 = spv.CL.exp %1 : vector<3xf16>
+    %2 = spirv.CL.exp %0 : f32
+    %3 = spirv.CL.exp %1 : vector<3xf16>
     ```
   }];
 }
@@ -254,14 +254,14 @@ def SPV_CLFAbsOp : SPV_CLUnaryArithmeticOp<"fabs", 23, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    abs-op ::= ssa-id `=` `spv.CL.fabs` ssa-use `:`
+    abs-op ::= ssa-id `=` `spirv.CL.fabs` ssa-use `:`
                float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.CL.fabs %0 : f32
-    %3 = spv.CL.fabs %1 : vector<3xf16>
+    %2 = spirv.CL.fabs %0 : f32
+    %3 = spirv.CL.fabs %1 : vector<3xf16>
     ```
   }];
 }
@@ -286,15 +286,15 @@ def SPV_CLFloorOp : SPV_CLUnaryArithmeticOp<"floor", 25, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    floor-op ::= ssa-id `=` `spv.CL.floor` ssa-use `:`
+    floor-op ::= ssa-id `=` `spirv.CL.floor` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.floor %0 : f32
-    %3 = spv.CL.ceifloorl %1 : vector<3xf16>
+    %2 = spirv.CL.floor %0 : f32
+    %3 = spirv.CL.ceifloorl %1 : vector<3xf16>
     ```
   }];
 }
@@ -319,13 +319,13 @@ def SPV_CLFmaOp : SPV_CLTernaryArithmeticOp<"fma", 26, SPV_Float> {
     <!-- End of AutoGen section -->
 
     ```
-    fma-op ::= ssa-id `=` `spv.CL.fma` ssa-use, ssa-use, ssa-use `:`
+    fma-op ::= ssa-id `=` `spirv.CL.fma` ssa-use, ssa-use, ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     ```
-    %0 = spv.CL.fma %a, %b, %c : f32
-    %1 = spv.CL.fma %a, %b, %c : vector<3xf16>
+    %0 = spirv.CL.fma %a, %b, %c : f32
+    %1 = spirv.CL.fma %a, %b, %c : vector<3xf16>
     ```
   }];
 }
@@ -349,14 +349,14 @@ def SPV_CLFMaxOp : SPV_CLBinaryArithmeticOp<"fmax", 27, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fmax-op ::= ssa-id `=` `spv.CL.fmax` ssa-use `:`
+    fmax-op ::= ssa-id `=` `spirv.CL.fmax` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.CL.fmax %0, %1 : f32
-    %3 = spv.CL.fmax %0, %1 : vector<3xf16>
+    %2 = spirv.CL.fmax %0, %1 : f32
+    %3 = spirv.CL.fmax %0, %1 : vector<3xf16>
     ```
   }];
 }
@@ -379,14 +379,14 @@ def SPV_CLFMinOp : SPV_CLBinaryArithmeticOp<"fmin", 28, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fmin-op ::= ssa-id `=` `spv.CL.fmin` ssa-use `:`
+    fmin-op ::= ssa-id `=` `spirv.CL.fmin` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.CL.fmin %0, %1 : f32
-    %3 = spv.CL.fmin %0, %1 : vector<3xf16>
+    %2 = spirv.CL.fmin %0, %1 : f32
+    %3 = spirv.CL.fmin %0, %1 : vector<3xf16>
     ```
   }];
 }
@@ -408,15 +408,15 @@ def SPV_CLLogOp : SPV_CLUnaryArithmeticOp<"log", 37, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    log-op ::= ssa-id `=` `spv.CL.log` ssa-use `:`
+    log-op ::= ssa-id `=` `spirv.CL.log` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.log %0 : f32
-    %3 = spv.CL.log %1 : vector<3xf16>
+    %2 = spirv.CL.log %0 : f32
+    %3 = spirv.CL.log %1 : vector<3xf16>
     ```
   }];
 }
@@ -440,14 +440,14 @@ def SPV_CLPowOp : SPV_CLBinaryArithmeticOp<"pow", 48, SPV_Float> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    pow-op ::= ssa-id `=` `spv.CL.pow` ssa-use `:`
+    pow-op ::= ssa-id `=` `spirv.CL.pow` ssa-use `:`
                restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.CL.pow %0, %1 : f32
-    %3 = spv.CL.pow %0, %1 : vector<3xf16>
+    %2 = spirv.CL.pow %0, %1 : f32
+    %3 = spirv.CL.pow %0, %1 : vector<3xf16>
     ```
   }];
 }
@@ -472,14 +472,14 @@ def SPV_CLRoundOp : SPV_CLUnaryArithmeticOp<"round", 55, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    round-op ::= ssa-id `=` `spv.CL.round` ssa-use `:`
+    round-op ::= ssa-id `=` `spirv.CL.round` ssa-use `:`
                float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.CL.round %0 : f32
-    %3 = spv.CL.round %0 : vector<3xf16>
+    %2 = spirv.CL.round %0 : f32
+    %3 = spirv.CL.round %0 : vector<3xf16>
     ```
   }];
 }
@@ -501,15 +501,15 @@ def SPV_CLRsqrtOp : SPV_CLUnaryArithmeticOp<"rsqrt", 56, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    rsqrt-op ::= ssa-id `=` `spv.CL.rsqrt` ssa-use `:`
+    rsqrt-op ::= ssa-id `=` `spirv.CL.rsqrt` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.rsqrt %0 : f32
-    %3 = spv.CL.rsqrt %1 : vector<3xf16>
+    %2 = spirv.CL.rsqrt %0 : f32
+    %3 = spirv.CL.rsqrt %1 : vector<3xf16>
     ```
   }];
 }
@@ -531,15 +531,15 @@ def SPV_CLSinOp : SPV_CLUnaryArithmeticOp<"sin", 57, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    sin-op ::= ssa-id `=` `spv.CL.sin` ssa-use `:`
+    sin-op ::= ssa-id `=` `spirv.CL.sin` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.sin %0 : f32
-    %3 = spv.CL.sin %1 : vector<3xf16>
+    %2 = spirv.CL.sin %0 : f32
+    %3 = spirv.CL.sin %1 : vector<3xf16>
     ```
   }];
 }
@@ -561,15 +561,15 @@ def SPV_CLSqrtOp : SPV_CLUnaryArithmeticOp<"sqrt", 61, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    sqrt-op ::= ssa-id `=` `spv.CL.sqrt` ssa-use `:`
+    sqrt-op ::= ssa-id `=` `spirv.CL.sqrt` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.sqrt %0 : f32
-    %3 = spv.CL.sqrt %1 : vector<3xf16>
+    %2 = spirv.CL.sqrt %0 : f32
+    %3 = spirv.CL.sqrt %1 : vector<3xf16>
     ```
   }];
 }
@@ -591,15 +591,15 @@ def SPV_CLTanhOp : SPV_CLUnaryArithmeticOp<"tanh", 63, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    tanh-op ::= ssa-id `=` `spv.CL.tanh` ssa-use `:`
+    tanh-op ::= ssa-id `=` `spirv.CL.tanh` ssa-use `:`
                float-scalar-vector-type
     ```mlir
 
     #### Example:
 
     ```
-    %2 = spv.CL.tanh %0 : f32
-    %3 = spv.CL.tanh %1 : vector<3xf16>
+    %2 = spirv.CL.tanh %0 : f32
+    %3 = spirv.CL.tanh %1 : vector<3xf16>
     ```
   }];
 }
@@ -622,14 +622,14 @@ def SPV_CLSAbsOp : SPV_CLUnaryArithmeticOp<"s_abs", 141, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    abs-op ::= ssa-id `=` `spv.CL.s_abs` ssa-use `:`
+    abs-op ::= ssa-id `=` `spirv.CL.s_abs` ssa-use `:`
                integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.CL.s_abs %0 : i32
-    %3 = spv.CL.s_abs %1 : vector<3xi16>
+    %2 = spirv.CL.s_abs %0 : i32
+    %3 = spirv.CL.s_abs %1 : vector<3xi16>
     ```
   }];
 }
@@ -650,13 +650,13 @@ def SPV_CLSMaxOp : SPV_CLBinaryArithmeticOp<"s_max", 156, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    smax-op ::= ssa-id `=` `spv.CL.s_max` ssa-use `:`
+    smax-op ::= ssa-id `=` `spirv.CL.s_max` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
     ```mlir
-    %2 = spv.CL.s_max %0, %1 : i32
-    %3 = spv.CL.s_max %0, %1 : vector<3xi16>
+    %2 = spirv.CL.s_max %0, %1 : i32
+    %3 = spirv.CL.s_max %0, %1 : vector<3xi16>
     ```
   }];
 }
@@ -677,13 +677,13 @@ def SPV_CLUMaxOp : SPV_CLBinaryArithmeticOp<"u_max", 157, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    umax-op ::= ssa-id `=` `spv.CL.u_max` ssa-use `:`
+    umax-op ::= ssa-id `=` `spirv.CL.u_max` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
     ```mlir
-    %2 = spv.CL.u_max %0, %1 : i32
-    %3 = spv.CL.u_max %0, %1 : vector<3xi16>
+    %2 = spirv.CL.u_max %0, %1 : i32
+    %3 = spirv.CL.u_max %0, %1 : vector<3xi16>
     ```
   }];
 }
@@ -702,13 +702,13 @@ def SPV_CLSMinOp : SPV_CLBinaryArithmeticOp<"s_min", 158, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    smin-op ::= ssa-id `=` `spv.CL.s_min` ssa-use `:`
+    smin-op ::= ssa-id `=` `spirv.CL.s_min` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
     ```mlir
-    %2 = spv.CL.s_min %0, %1 : i32
-    %3 = spv.CL.s_min %0, %1 : vector<3xi16>
+    %2 = spirv.CL.s_min %0, %1 : i32
+    %3 = spirv.CL.s_min %0, %1 : vector<3xi16>
     ```
   }];
 }
@@ -729,13 +729,13 @@ def SPV_CLUMinOp : SPV_CLBinaryArithmeticOp<"u_min", 159, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    umin-op ::= ssa-id `=` `spv.CL.u_min` ssa-use `:`
+    umin-op ::= ssa-id `=` `spirv.CL.u_min` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
     ```mlir
-    %2 = spv.CL.u_min %0, %1 : i32
-    %3 = spv.CL.u_min %0, %1 : vector<3xi16>
+    %2 = spirv.CL.u_min %0, %1 : i32
+    %3 = spirv.CL.u_min %0, %1 : vector<3xi16>
     ```
   }];
 }

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td
index 99a54067e481..f45a2175e7c8 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCastOps.td
@@ -64,16 +64,16 @@ def SPV_BitcastOp : SPV_Op<"Bitcast", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    bitcast-op ::= ssa-id `=` `spv.Bitcast` ssa-use
+    bitcast-op ::= ssa-id `=` `spirv.Bitcast` ssa-use
                    `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.Bitcast %0 : f32 to i32
-    %1 = spv.Bitcast %0 : vector<2xf32> to i64
-    %1 = spv.Bitcast %0 : !spv.ptr<f32, Function> to !spv.ptr<i32, Function>
+    %1 = spirv.Bitcast %0 : f32 to i32
+    %1 = spirv.Bitcast %0 : vector<2xf32> to i64
+    %1 = spirv.Bitcast %0 : !spirv.ptr<f32, Function> to !spirv.ptr<i32, Function>
     ```
   }];
 
@@ -110,15 +110,15 @@ def SPV_ConvertFToSOp : SPV_CastOp<"ConvertFToS", SPV_Integer, SPV_Float, []> {
     <!-- End of AutoGen section -->
 
     ```
-    convert-f-to-s-op ::= ssa-id `=` `spv.ConvertFToSOp` ssa-use
+    convert-f-to-s-op ::= ssa-id `=` `spirv.ConvertFToSOp` ssa-use
                           `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.ConvertFToS %0 : f32 to i32
-    %3 = spv.ConvertFToS %2 : vector<3xf32> to vector<3xi32>
+    %1 = spirv.ConvertFToS %0 : f32 to i32
+    %3 = spirv.ConvertFToS %2 : vector<3xf32> to vector<3xi32>
     ```
   }];
 }
@@ -143,15 +143,15 @@ def SPV_ConvertFToUOp : SPV_CastOp<"ConvertFToU", SPV_Integer, SPV_Float, []> {
     <!-- End of AutoGen section -->
 
     ```
-    convert-f-to-u-op ::= ssa-id `=` `spv.ConvertFToUOp` ssa-use
+    convert-f-to-u-op ::= ssa-id `=` `spirv.ConvertFToUOp` ssa-use
                           `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.ConvertFToU %0 : f32 to i32
-    %3 = spv.ConvertFToU %2 : vector<3xf32> to vector<3xi32>
+    %1 = spirv.ConvertFToU %0 : f32 to i32
+    %3 = spirv.ConvertFToU %2 : vector<3xf32> to vector<3xi32>
     ```
   }];
 }
@@ -177,15 +177,15 @@ def SPV_ConvertSToFOp : SPV_CastOp<"ConvertSToF",
     <!-- End of AutoGen section -->
 
     ```
-    convert-s-to-f-op ::= ssa-id `=` `spv.ConvertSToFOp` ssa-use
+    convert-s-to-f-op ::= ssa-id `=` `spirv.ConvertSToFOp` ssa-use
                           `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.ConvertSToF %0 : i32 to f32
-    %3 = spv.ConvertSToF %2 : vector<3xi32> to vector<3xf32>
+    %1 = spirv.ConvertSToF %0 : i32 to f32
+    %3 = spirv.ConvertSToF %2 : vector<3xi32> to vector<3xf32>
     ```
   }];
 }
@@ -211,15 +211,15 @@ def SPV_ConvertUToFOp : SPV_CastOp<"ConvertUToF",
     <!-- End of AutoGen section -->
 
     ```
-    convert-u-to-f-op ::= ssa-id `=` `spv.ConvertUToFOp` ssa-use
+    convert-u-to-f-op ::= ssa-id `=` `spirv.ConvertUToFOp` ssa-use
                           `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.ConvertUToF %0 : i32 to f32
-    %3 = spv.ConvertUToF %2 : vector<3xi32> to vector<3xf32>
+    %1 = spirv.ConvertUToF %0 : i32 to f32
+    %3 = spirv.ConvertUToF %2 : vector<3xi32> to vector<3xf32>
     ```
   }];
 }
@@ -247,15 +247,15 @@ def SPV_FConvertOp : SPV_CastOp<"FConvert",
     <!-- End of AutoGen section -->
 
     ```
-    f-convert-op ::= ssa-id `=` `spv.FConvertOp` ssa-use
+    f-convert-op ::= ssa-id `=` `spirv.FConvertOp` ssa-use
                      `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.FConvertOp %0 : f32 to f64
-    %3 = spv.FConvertOp %2 : vector<3xf32> to vector<3xf64>
+    %1 = spirv.FConvertOp %0 : f32 to f64
+    %3 = spirv.FConvertOp %2 : vector<3xf32> to vector<3xf64>
     ```
   }];
 }
@@ -282,15 +282,15 @@ def SPV_SConvertOp : SPV_CastOp<"SConvert",
     <!-- End of AutoGen section -->
 
     ```
-    s-convert-op ::= ssa-id `=` `spv.SConvertOp` ssa-use
+    s-convert-op ::= ssa-id `=` `spirv.SConvertOp` ssa-use
                      `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.SConvertOp %0 : i32 to i64
-    %3 = spv.SConvertOp %2 : vector<3xi32> to vector<3xi64>
+    %1 = spirv.SConvertOp %0 : i32 to i64
+    %3 = spirv.SConvertOp %2 : vector<3xi32> to vector<3xi64>
     ```
   }];
 }
@@ -318,15 +318,15 @@ def SPV_UConvertOp : SPV_CastOp<"UConvert",
     <!-- End of AutoGen section -->
 
     ```
-    u-convert-op ::= ssa-id `=` `spv.UConvertOp` ssa-use
+    u-convert-op ::= ssa-id `=` `spirv.UConvertOp` ssa-use
                  `:` operand-type `to` result-type
     ```
 
     #### Example:
 
     ```mlir
-    %1 = spv.UConvertOp %0 : i32 to i64
-    %3 = spv.UConvertOp %2 : vector<3xi32> to vector<3xi64>
+    %1 = spirv.UConvertOp %0 : i32 to i64
+    %3 = spirv.UConvertOp %2 : vector<3xi32> to vector<3xi64>
     ```
   }];
 }
@@ -348,8 +348,8 @@ def SPV_PtrCastToGenericOp : SPV_Op<"PtrCastToGeneric", [NoSideEffect]> {
     #### Example:
 
     ```mlir
-    %1 = spv.PtrCastToGenericOp %0 : !spv.ptr<f32, CrossWorkGroup> to 
-         !spv.ptr<f32, Generic>
+    %1 = spirv.PtrCastToGenericOp %0 : !spirv.ptr<f32, CrossWorkGroup> to 
+         !spirv.ptr<f32, Generic>
     ```
   }];
 
@@ -391,8 +391,8 @@ def SPV_GenericCastToPtrOp : SPV_Op<"GenericCastToPtr", [NoSideEffect]> {
     #### Example:
 
     ```mlir
-       %1 = spv.GenericCastToPtrOp %0 : !spv.ptr<f32, Generic> to
-       !spv.ptr<f32, CrossWorkGroup>
+       %1 = spirv.GenericCastToPtrOp %0 : !spirv.ptr<f32, Generic> to
+       !spirv.ptr<f32, CrossWorkGroup>
     ```
   }];
 
@@ -444,8 +444,8 @@ def SPV_GenericCastToPtrExplicitOp : SPV_Op<"GenericCastToPtrExplicit", [NoSideE
     #### Example:
 
     ```mlir
-       %1 = spv.GenericCastToPtrExplicitOp %0 : !spv.ptr<f32, Generic> to
-       !spv.ptr<f32, CrossWorkGroup>
+       %1 = spirv.GenericCastToPtrExplicitOp %0 : !spirv.ptr<f32, Generic> to
+       !spirv.ptr<f32, CrossWorkGroup>
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
index 3b9b844f4d42..35c0c4810ae5 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCompositeOps.td
@@ -46,14 +46,14 @@ def SPV_CompositeConstructOp : SPV_Op<"CompositeConstruct", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    composite-construct-op ::= ssa-id `=` `spv.CompositeConstruct`
+    composite-construct-op ::= ssa-id `=` `spirv.CompositeConstruct`
                                (ssa-use (`,` ssa-use)* )? `:` composite-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.CompositeConstruct %1, %2, %3 : vector<3xf32>
+    %0 = spirv.CompositeConstruct %1, %2, %3 : vector<3xf32>
     ```
   }];
 
@@ -90,7 +90,7 @@ def SPV_CompositeExtractOp : SPV_Op<"CompositeExtract",
     <!-- End of AutoGen section -->
 
     ```
-    composite-extract-op ::= ssa-id `=` `spv.CompositeExtract` ssa-use
+    composite-extract-op ::= ssa-id `=` `spirv.CompositeExtract` ssa-use
                              `[` integer-literal (',' integer-literal)* `]`
                              `:` composite-type
     ```
@@ -98,9 +98,9 @@ def SPV_CompositeExtractOp : SPV_Op<"CompositeExtract",
     #### Example:
 
     ```mlir
-    %0 = spv.Variable : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>
-    %1 = spv.Load "Function" %0 ["Volatile"] : !spv.array<4x!spv.array<4xf32>>
-    %2 = spv.CompositeExtract %1[1 : i32] : !spv.array<4x!spv.array<4xf32>>
+    %0 = spirv.Variable : !spirv.ptr<!spirv.array<4x!spirv.array<4xf32>>, Function>
+    %1 = spirv.Load "Function" %0 ["Volatile"] : !spirv.array<4x!spirv.array<4xf32>>
+    %2 = spirv.CompositeExtract %1[1 : i32] : !spirv.array<4x!spirv.array<4xf32>>
     ```
 
   }];
@@ -145,7 +145,7 @@ def SPV_CompositeInsertOp : SPV_Op<"CompositeInsert",
     <!-- End of AutoGen section -->
 
     ```
-    composite-insert-op ::= ssa-id `=` `spv.CompositeInsert` ssa-use, ssa-use
+    composite-insert-op ::= ssa-id `=` `spirv.CompositeInsert` ssa-use, ssa-use
                             `[` integer-literal (',' integer-literal)* `]`
                             `:` object-type `into` composite-type
     ```
@@ -153,7 +153,7 @@ def SPV_CompositeInsertOp : SPV_Op<"CompositeInsert",
     #### Example:
 
     ```mlir
-    %0 = spv.CompositeInsert %object, %composite[1 : i32] : f32 into !spv.array<4xf32>
+    %0 = spirv.CompositeInsert %object, %composite[1 : i32] : f32 into !spirv.array<4xf32>
     ```
   }];
 
@@ -201,7 +201,7 @@ def SPV_VectorExtractDynamicOp : SPV_Op<"VectorExtractDynamic", [
     #### Example:
 
     ```
-    %2 = spv.VectorExtractDynamic %0[%1] : vector<8xf32>, i32
+    %2 = spirv.VectorExtractDynamic %0[%1] : vector<8xf32>, i32
     ```
   }];
 
@@ -254,7 +254,7 @@ def SPV_VectorInsertDynamicOp : SPV_Op<"VectorInsertDynamic", [
 
     ```
     scalar-type ::= integer-type | float-type | boolean-type
-    vector-insert-dynamic-op ::= `spv.VectorInsertDynamic ` ssa-use `,`
+    vector-insert-dynamic-op ::= `spirv.VectorInsertDynamic ` ssa-use `,`
                                   ssa-use `[` ssa-use `]`
                                   `:` `vector<` integer-literal `x` scalar-type `>` `,`
                                   integer-type
@@ -264,7 +264,7 @@ def SPV_VectorInsertDynamicOp : SPV_Op<"VectorInsertDynamic", [
 
     ```
     %scalar = ... : f32
-    %2 = spv.VectorInsertDynamic %scalar %0[%1] : f32, vector<8xf32>, i32
+    %2 = spirv.VectorInsertDynamic %scalar %0[%1] : f32, vector<8xf32>, i32
     ```
   }];
 
@@ -323,7 +323,7 @@ def SPV_VectorShuffleOp : SPV_Op<"VectorShuffle", [
     #### Example:
 
     ```mlir
-    %0 = spv.VectorShuffle [1: i32, 3: i32, 5: i32]
+    %0 = spirv.VectorShuffle [1: i32, 3: i32, 5: i32]
                            %vector1: vector<4xf32>, %vector2: vector<2xf32>
                         -> vector<3xf32>
     ```

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td
index 17ac2ed430f2..4c4ffb290799 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVControlFlowOps.td
@@ -32,7 +32,7 @@ def SPV_BranchOp : SPV_Op<"Branch", [
     <!-- End of AutoGen section -->
 
     ```
-    branch-op ::= `spv.Branch` successor
+    branch-op ::= `spirv.Branch` successor
     successor ::= bb-id branch-use-list?
     branch-use-list ::= `(` ssa-use-list `:` type-list-no-parens `)`
     ```
@@ -40,8 +40,8 @@ def SPV_BranchOp : SPV_Op<"Branch", [
     #### Example:
 
     ```mlir
-    spv.Branch ^target
-    spv.Branch ^target(%0, %1: i32, f32)
+    spirv.Branch ^target
+    spirv.Branch ^target(%0, %1: i32, f32)
     ```
   }];
 
@@ -103,7 +103,7 @@ def SPV_BranchConditionalOp : SPV_Op<"BranchConditional", [
     <!-- End of AutoGen section -->
 
     ```
-    branch-conditional-op ::= `spv.BranchConditional` ssa-use
+    branch-conditional-op ::= `spirv.BranchConditional` ssa-use
                               (`[` integer-literal, integer-literal `]`)?
                               `,` successor `,` successor
     successor ::= bb-id branch-use-list?
@@ -113,8 +113,8 @@ def SPV_BranchConditionalOp : SPV_Op<"BranchConditional", [
     #### Example:
 
     ```mlir
-    spv.BranchConditional %condition, ^true_branch, ^false_branch
-    spv.BranchConditional %condition, ^true_branch(%0: i32), ^false_branch(%1: i32)
+    spirv.BranchConditional %condition, ^true_branch, ^false_branch
+    spirv.BranchConditional %condition, ^true_branch(%0: i32), ^false_branch(%1: i32)
     ```
   }];
 
@@ -215,15 +215,15 @@ def SPV_FunctionCallOp : SPV_Op<"FunctionCall", [
     <!-- End of AutoGen section -->
 
     ```
-    function-call-op ::= `spv.FunctionCall` function-id `(` ssa-use-list `)`
+    function-call-op ::= `spirv.FunctionCall` function-id `(` ssa-use-list `)`
                      `:` function-type
     ```
 
     #### Example:
 
     ```mlir
-    spv.FunctionCall @f_void(%arg0) : (i32) ->  ()
-    %0 = spv.FunctionCall @f_iadd(%arg0, %arg1) : (i32, i32) -> i32
+    spirv.FunctionCall @f_void(%arg0) : (i32) ->  ()
+    %0 = spirv.FunctionCall @f_iadd(%arg0, %arg1) : (i32, i32) -> i32
     ```
   }];
 
@@ -257,19 +257,19 @@ def SPV_LoopOp : SPV_Op<"mlir.loop", [InFunctionScope]> {
     and exited in structured ways. See "2.11. Structured Control Flow" of the
     SPIR-V spec for more details.
 
-    Instead of having a `spv.LoopMerge` op to directly model loop merge
+    Instead of having a `spirv.LoopMerge` op to directly model loop merge
     instruction for indicating the merge and continue target, we use regions
     to delimit the boundary of the loop: the merge target is the next op
-    following the `spv.mlir.loop` op and the continue target is the block that
-    has a back-edge pointing to the entry block inside the `spv.mlir.loop`'s region.
+    following the `spirv.mlir.loop` op and the continue target is the block that
+    has a back-edge pointing to the entry block inside the `spirv.mlir.loop`'s region.
     This way it's easier to discover all blocks belonging to a construct and
     it plays nicer with the MLIR system.
 
-    The `spv.mlir.loop` region should contain at least four blocks: one entry block,
+    The `spirv.mlir.loop` region should contain at least four blocks: one entry block,
     one loop header block, one loop continue block, one loop merge block.
     The entry block should be the first block and it should jump to the loop
     header block, which is the second block. The loop merge block should be the
-    last block. The merge block should only contain a `spv.mlir.merge` op.
+    last block. The merge block should only contain a `spirv.mlir.merge` op.
     The continue block should be the second to last block and it should have a
     branch to the loop header block. The loop continue block should be the only
     block, except the entry block, branching to the header block.
@@ -299,7 +299,7 @@ def SPV_LoopOp : SPV_Op<"mlir.loop", [InFunctionScope]> {
     Block *getMergeBlock();
 
     // Adds an empty entry block and loop merge block containing one
-    // spv.mlir.merge op.
+    // spirv.mlir.merge op.
     void addEntryAndMergeBlock();
   }];
 
@@ -317,10 +317,10 @@ def SPV_MergeOp : SPV_Op<"mlir.merge", [NoSideEffect, Terminator]> {
   let summary = "A special terminator for merging a structured selection/loop.";
 
   let description = [{
-    We use `spv.mlir.selection`/`spv.mlir.loop` for modelling structured selection/loop.
+    We use `spirv.mlir.selection`/`spirv.mlir.loop` for modelling structured selection/loop.
     This op is a terminator used inside their regions to mean jumping to the
-    merge point, which is the next op following the `spv.mlir.selection` or
-    `spv.mlir.loop` op. This op does not have a corresponding instruction in the
+    merge point, which is the next op following the `spirv.mlir.selection` or
+    `spirv.mlir.loop` op. This op does not have a corresponding instruction in the
     SPIR-V binary format; it's solely for structural purpose.
   }];
 
@@ -347,7 +347,7 @@ def SPV_ReturnOp : SPV_Op<"Return", [InFunctionScope, NoSideEffect,
     <!-- End of AutoGen section -->
 
     ```
-    return-op ::= `spv.Return`
+    return-op ::= `spirv.Return`
     ```
   }];
 
@@ -369,7 +369,7 @@ def SPV_UnreachableOp : SPV_Op<"Unreachable", [InFunctionScope, Terminator]> {
     <!-- End of AutoGen section -->
 
     ```
-    unreachable-op ::= `spv.Unreachable`
+    unreachable-op ::= `spirv.Unreachable`
     ```
   }];
 
@@ -396,13 +396,13 @@ def SPV_ReturnValueOp : SPV_Op<"ReturnValue", [InFunctionScope, NoSideEffect,
     <!-- End of AutoGen section -->
 
     ```
-    return-value-op ::= `spv.ReturnValue` ssa-use `:` spirv-type
+    return-value-op ::= `spirv.ReturnValue` ssa-use `:` spirv-type
     ```
 
     #### Example:
 
     ```mlir
-    spv.ReturnValue %0 : f32
+    spirv.ReturnValue %0 : f32
     ```
   }];
 
@@ -426,16 +426,16 @@ def SPV_SelectionOp : SPV_Op<"mlir.selection", [InFunctionScope]> {
     and exited in structured ways. See "2.11. Structured Control Flow" of the
     SPIR-V spec for more details.
 
-    Instead of having a `spv.SelectionMerge` op to directly model selection
+    Instead of having a `spirv.SelectionMerge` op to directly model selection
     merge instruction for indicating the merge target, we use regions to delimit
     the boundary of the selection: the merge target is the next op following the
-    `spv.mlir.selection` op. This way it's easier to discover all blocks belonging to
+    `spirv.mlir.selection` op. This way it's easier to discover all blocks belonging to
     the selection and it plays nicer with the MLIR system.
 
-    The `spv.mlir.selection` region should contain at least two blocks: one selection
+    The `spirv.mlir.selection` region should contain at least two blocks: one selection
     header block, and one selection merge. The selection header block should be
     the first block. The selection merge block should be the last block.
-    The merge block should only contain a `spv.mlir.merge` op.
+    The merge block should only contain a `spirv.mlir.merge` op.
   }];
 
   let arguments = (ins
@@ -453,12 +453,12 @@ def SPV_SelectionOp : SPV_Op<"mlir.selection", [InFunctionScope]> {
     /// Returns the selection merge block.
     Block *getMergeBlock();
 
-    /// Adds a selection merge block containing one spv.mlir.merge op.
+    /// Adds a selection merge block containing one spirv.mlir.merge op.
     void addMergeBlock();
 
-    /// Creates a spv.mlir.selection op for `if (<condition>) then { <thenBody> }`
+    /// Creates a spirv.mlir.selection op for `if (<condition>) then { <thenBody> }`
     /// with `builder`. `builder`'s insertion point will remain at after the
-    /// newly inserted spv.mlir.selection op afterwards.
+    /// newly inserted spirv.mlir.selection op afterwards.
     static SelectionOp createIfThen(
         Location loc, Value condition,
         function_ref<void(OpBuilder &builder)> thenBody,

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td
index 9675c9df6011..9b31d424c9d2 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVCooperativeMatrixOps.td
@@ -28,14 +28,14 @@ def SPV_NVCooperativeMatrixLengthOp : SPV_NvVendorOp<"CooperativeMatrixLength",
     Type is a cooperative matrix type.
 
     ``` {.ebnf}
-    cooperative-matrix-length-op ::= ssa-id `=` `spv.NV.CooperativeMatrixLength
+    cooperative-matrix-length-op ::= ssa-id `=` `spirv.NV.CooperativeMatrixLength
                                     ` : ` cooperative-matrix-type
     ```
 
     For example:
 
     ```
-    %0 = spv.NV.CooperativeMatrixLength : !spv.coopmatrix<Subgroup, i32, 8, 16>
+    %0 = spirv.NV.CooperativeMatrixLength : !spirv.coopmatrix<Subgroup, i32, 8, 16>
     ```
   }];
 
@@ -100,7 +100,7 @@ def SPV_NVCooperativeMatrixLoadOp : SPV_NvVendorOp<"CooperativeMatrixLoad", []>
     ### Custom assembly form
 
     ``` {.ebnf}
-    cooperative-matrixload-op ::= ssa-id `=` `spv.NV.CooperativeMatrixLoad`
+    cooperative-matrixload-op ::= ssa-id `=` `spirv.NV.CooperativeMatrixLoad`
                               ssa-use `,` ssa-use `,` ssa-use
                               (`[` memory-access `]`)? ` : `
                               pointer-type `as`
@@ -110,8 +110,8 @@ def SPV_NVCooperativeMatrixLoadOp : SPV_NvVendorOp<"CooperativeMatrixLoad", []>
     For example:
 
     ```
-    %0 = spv.NV.CooperativeMatrixLoad %ptr, %stride, %colMajor
-         : !spv.ptr<i32, StorageBuffer> as !spv.coopmatrix<i32, Workgroup, 16, 8>
+    %0 = spirv.NV.CooperativeMatrixLoad %ptr, %stride, %colMajor
+         : !spirv.ptr<i32, StorageBuffer> as !spirv.coopmatrix<i32, Workgroup, 16, 8>
     ```
   }];
 
@@ -172,7 +172,7 @@ def SPV_NVCooperativeMatrixMulAddOp : SPV_NvVendorOp<"CooperativeMatrixMulAdd",
     the scope of the operation).
 
     ``` {.ebnf}
-    cooperative-matrixmuladd-op ::= ssa-id `=` `spv.NV.CooperativeMatrixMulAdd`
+    cooperative-matrixmuladd-op ::= ssa-id `=` `spirv.NV.CooperativeMatrixMulAdd`
                               ssa-use `,` ssa-use `,` ssa-use ` : `
                               a-cooperative-matrix-type,
                               b-cooperative-matrix-type ->
@@ -181,8 +181,8 @@ def SPV_NVCooperativeMatrixMulAddOp : SPV_NvVendorOp<"CooperativeMatrixMulAdd",
     For example:
 
     ```
-    %0 = spv.NV.CooperativeMatrixMulAdd %arg0, %arg1, %arg2,  :
-      !spv.coopmatrix<Subgroup, i32, 8, 16>
+    %0 = spirv.NV.CooperativeMatrixMulAdd %arg0, %arg1, %arg2,  :
+      !spirv.coopmatrix<Subgroup, i32, 8, 16>
     ```
   }];
 
@@ -236,7 +236,7 @@ def SPV_NVCooperativeMatrixStoreOp : SPV_NvVendorOp<"CooperativeMatrixStore", []
     same as specifying None.
 
     ``` {.ebnf}
-    coop-matrix-store-op ::= `spv.NV.CooperativeMatrixStore `
+    coop-matrix-store-op ::= `spirv.NV.CooperativeMatrixStore `
                               ssa-use `, ` ssa-use `, `
                               ssa-use `, ` ssa-use `, `
                               (`[` memory-access `]`)? `:`
@@ -246,8 +246,8 @@ def SPV_NVCooperativeMatrixStoreOp : SPV_NvVendorOp<"CooperativeMatrixStore", []
     For example:
 
     ```
-      spv.NV.CooperativeMatrixStore %arg0, %arg2, %arg1, %arg3 :
-        !spv.ptr<i32, StorageBuffer>, !spv.coopmatrix<Workgroup, i32, 16, 8>
+      spirv.NV.CooperativeMatrixStore %arg0, %arg2, %arg1, %arg3 :
+        !spirv.ptr<i32, StorageBuffer>, !spirv.coopmatrix<Workgroup, i32, 16, 8>
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td
index 1243f225df82..5bfa08990591 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGLOps.td
@@ -120,14 +120,14 @@ def SPV_GLFAbsOp : SPV_GLUnaryArithmeticOp<"FAbs", 4, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    abs-op ::= ssa-id `=` `spv.GL.FAbs` ssa-use `:`
+    abs-op ::= ssa-id `=` `spirv.GL.FAbs` ssa-use `:`
                float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.FAbs %0 : f32
-    %3 = spv.GL.FAbs %1 : vector<3xf16>
+    %2 = spirv.GL.FAbs %0 : f32
+    %3 = spirv.GL.FAbs %1 : vector<3xf16>
     ```
   }];
 }
@@ -149,14 +149,14 @@ def SPV_GLSAbsOp : SPV_GLUnaryArithmeticOp<"SAbs", 5, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    abs-op ::= ssa-id `=` `spv.GL.SAbs` ssa-use `:`
+    abs-op ::= ssa-id `=` `spirv.GL.SAbs` ssa-use `:`
                integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.SAbs %0 : i32
-    %3 = spv.GL.SAbs %1 : vector<3xi16>
+    %2 = spirv.GL.SAbs %0 : i32
+    %3 = spirv.GL.SAbs %1 : vector<3xi16>
     ```
   }];
 }
@@ -180,14 +180,14 @@ def SPV_GLCeilOp : SPV_GLUnaryArithmeticOp<"Ceil", 9, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    ceil-op ::= ssa-id `=` `spv.GL.Ceil` ssa-use `:`
+    ceil-op ::= ssa-id `=` `spirv.GL.Ceil` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Ceil %0 : f32
-    %3 = spv.GL.Ceil %1 : vector<3xf16>
+    %2 = spirv.GL.Ceil %0 : f32
+    %3 = spirv.GL.Ceil %1 : vector<3xf16>
     ```
   }];
 }
@@ -212,14 +212,14 @@ def SPV_GLCosOp : SPV_GLUnaryArithmeticOp<"Cos", 14, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    cos-op ::= ssa-id `=` `spv.GL.Cos` ssa-use `:`
+    cos-op ::= ssa-id `=` `spirv.GL.Cos` ssa-use `:`
                restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Cos %0 : f32
-    %3 = spv.GL.Cos %1 : vector<3xf16>
+    %2 = spirv.GL.Cos %0 : f32
+    %3 = spirv.GL.Cos %1 : vector<3xf16>
     ```
   }];
 }
@@ -244,14 +244,14 @@ def SPV_GLSinOp : SPV_GLUnaryArithmeticOp<"Sin", 13, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    sin-op ::= ssa-id `=` `spv.GL.Sin` ssa-use `:`
+    sin-op ::= ssa-id `=` `spirv.GL.Sin` ssa-use `:`
                restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Sin %0 : f32
-    %3 = spv.GL.Sin %1 : vector<3xf16>
+    %2 = spirv.GL.Sin %0 : f32
+    %3 = spirv.GL.Sin %1 : vector<3xf16>
     ```
   }];
 }
@@ -276,14 +276,14 @@ def SPV_GLTanOp : SPV_GLUnaryArithmeticOp<"Tan", 15, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    tan-op ::= ssa-id `=` `spv.GL.Tan` ssa-use `:`
+    tan-op ::= ssa-id `=` `spirv.GL.Tan` ssa-use `:`
                restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Tan %0 : f32
-    %3 = spv.GL.Tan %1 : vector<3xf16>
+    %2 = spirv.GL.Tan %0 : f32
+    %3 = spirv.GL.Tan %1 : vector<3xf16>
     ```
   }];
 }
@@ -310,14 +310,14 @@ def SPV_GLAsinOp : SPV_GLUnaryArithmeticOp<"Asin", 16, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    asin-op ::= ssa-id `=` `spv.GL.Asin` ssa-use `:`
+    asin-op ::= ssa-id `=` `spirv.GL.Asin` ssa-use `:`
                 restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Asin %0 : f32
-    %3 = spv.GL.Asin %1 : vector<3xf16>
+    %2 = spirv.GL.Asin %0 : f32
+    %3 = spirv.GL.Asin %1 : vector<3xf16>
     ```
   }];
 }
@@ -344,14 +344,14 @@ def SPV_GLAcosOp : SPV_GLUnaryArithmeticOp<"Acos", 17, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    acos-op ::= ssa-id `=` `spv.GL.Acos` ssa-use `:`
+    acos-op ::= ssa-id `=` `spirv.GL.Acos` ssa-use `:`
                 restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Acos %0 : f32
-    %3 = spv.GL.Acos %1 : vector<3xf16>
+    %2 = spirv.GL.Acos %0 : f32
+    %3 = spirv.GL.Acos %1 : vector<3xf16>
     ```
   }];
 }
@@ -378,14 +378,14 @@ def SPV_GLAtanOp : SPV_GLUnaryArithmeticOp<"Atan", 18, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    atan-op ::= ssa-id `=` `spv.GL.Atan` ssa-use `:`
+    atan-op ::= ssa-id `=` `spirv.GL.Atan` ssa-use `:`
                 restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Atan %0 : f32
-    %3 = spv.GL.Atan %1 : vector<3xf16>
+    %2 = spirv.GL.Atan %0 : f32
+    %3 = spirv.GL.Atan %1 : vector<3xf16>
     ```
   }];
 }
@@ -410,14 +410,14 @@ def SPV_GLExpOp : SPV_GLUnaryArithmeticOp<"Exp", 27, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    exp-op ::= ssa-id `=` `spv.GL.Exp` ssa-use `:`
+    exp-op ::= ssa-id `=` `spirv.GL.Exp` ssa-use `:`
                restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Exp %0 : f32
-    %3 = spv.GL.Exp %1 : vector<3xf16>
+    %2 = spirv.GL.Exp %0 : f32
+    %3 = spirv.GL.Exp %1 : vector<3xf16>
     ```
   }];
 }
@@ -441,14 +441,14 @@ def SPV_GLFloorOp : SPV_GLUnaryArithmeticOp<"Floor", 8, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    floor-op ::= ssa-id `=` `spv.GL.Floor` ssa-use `:`
+    floor-op ::= ssa-id `=` `spirv.GL.Floor` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Floor %0 : f32
-    %3 = spv.GL.Floor %1 : vector<3xf16>
+    %2 = spirv.GL.Floor %0 : f32
+    %3 = spirv.GL.Floor %1 : vector<3xf16>
     ```
   }];
 }
@@ -471,14 +471,14 @@ def SPV_GLRoundOp: SPV_GLUnaryArithmeticOp<"Round", 1, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    floor-op ::= ssa-id `=` `spv.GL.Round` ssa-use `:`
+    floor-op ::= ssa-id `=` `spirv.GL.Round` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Round %0 : f32
-    %3 = spv.GL.Round %1 : vector<3xf16>
+    %2 = spirv.GL.Round %0 : f32
+    %3 = spirv.GL.Round %1 : vector<3xf16>
     ```
   }];
 }
@@ -501,14 +501,14 @@ def SPV_GLInverseSqrtOp : SPV_GLUnaryArithmeticOp<"InverseSqrt", 32, SPV_Float>
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    rsqrt-op ::= ssa-id `=` `spv.GL.InverseSqrt` ssa-use `:`
+    rsqrt-op ::= ssa-id `=` `spirv.GL.InverseSqrt` ssa-use `:`
                  float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.InverseSqrt %0 : f32
-    %3 = spv.GL.InverseSqrt %1 : vector<3xf16>
+    %2 = spirv.GL.InverseSqrt %0 : f32
+    %3 = spirv.GL.InverseSqrt %1 : vector<3xf16>
     ```
   }];
 }
@@ -534,14 +534,14 @@ def SPV_GLLogOp : SPV_GLUnaryArithmeticOp<"Log", 28, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    log-op ::= ssa-id `=` `spv.GL.Log` ssa-use `:`
+    log-op ::= ssa-id `=` `spirv.GL.Log` ssa-use `:`
                restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Log %0 : f32
-    %3 = spv.GL.Log %1 : vector<3xf16>
+    %2 = spirv.GL.Log %0 : f32
+    %3 = spirv.GL.Log %1 : vector<3xf16>
     ```
   }];
 }
@@ -565,14 +565,14 @@ def SPV_GLFMaxOp : SPV_GLBinaryArithmeticOp<"FMax", 40, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fmax-op ::= ssa-id `=` `spv.GL.FMax` ssa-use `:`
+    fmax-op ::= ssa-id `=` `spirv.GL.FMax` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.FMax %0, %1 : f32
-    %3 = spv.GL.FMax %0, %1 : vector<3xf16>
+    %2 = spirv.GL.FMax %0, %1 : f32
+    %3 = spirv.GL.FMax %0, %1 : vector<3xf16>
     ```
   }];
 }
@@ -595,14 +595,14 @@ def SPV_GLUMaxOp : SPV_GLBinaryArithmeticOp<"UMax", 41, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    smax-op ::= ssa-id `=` `spv.GL.UMax` ssa-use `:`
+    smax-op ::= ssa-id `=` `spirv.GL.UMax` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.UMax %0, %1 : i32
-    %3 = spv.GL.UMax %0, %1 : vector<3xi16>
+    %2 = spirv.GL.UMax %0, %1 : i32
+    %3 = spirv.GL.UMax %0, %1 : vector<3xi16>
     ```
   }];
 }
@@ -625,14 +625,14 @@ def SPV_GLSMaxOp : SPV_GLBinaryArithmeticOp<"SMax", 42, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    smax-op ::= ssa-id `=` `spv.GL.SMax` ssa-use `:`
+    smax-op ::= ssa-id `=` `spirv.GL.SMax` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.SMax %0, %1 : i32
-    %3 = spv.GL.SMax %0, %1 : vector<3xi16>
+    %2 = spirv.GL.SMax %0, %1 : i32
+    %3 = spirv.GL.SMax %0, %1 : vector<3xi16>
     ```
   }];
 }
@@ -656,14 +656,14 @@ def SPV_GLFMinOp : SPV_GLBinaryArithmeticOp<"FMin", 37, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fmin-op ::= ssa-id `=` `spv.GL.FMin` ssa-use `:`
+    fmin-op ::= ssa-id `=` `spirv.GL.FMin` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.FMin %0, %1 : f32
-    %3 = spv.GL.FMin %0, %1 : vector<3xf16>
+    %2 = spirv.GL.FMin %0, %1 : f32
+    %3 = spirv.GL.FMin %0, %1 : vector<3xf16>
     ```
   }];
 }
@@ -686,14 +686,14 @@ def SPV_GLUMinOp : SPV_GLBinaryArithmeticOp<"UMin", 38, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    smin-op ::= ssa-id `=` `spv.GL.UMin` ssa-use `:`
+    smin-op ::= ssa-id `=` `spirv.GL.UMin` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.UMin %0, %1 : i32
-    %3 = spv.GL.UMin %0, %1 : vector<3xi16>
+    %2 = spirv.GL.UMin %0, %1 : i32
+    %3 = spirv.GL.UMin %0, %1 : vector<3xi16>
     ```
   }];
 }
@@ -716,14 +716,14 @@ def SPV_GLSMinOp : SPV_GLBinaryArithmeticOp<"SMin", 39, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    smin-op ::= ssa-id `=` `spv.GL.SMin` ssa-use `:`
+    smin-op ::= ssa-id `=` `spirv.GL.SMin` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.SMin %0, %1 : i32
-    %3 = spv.GL.SMin %0, %1 : vector<3xi16>
+    %2 = spirv.GL.SMin %0, %1 : i32
+    %3 = spirv.GL.SMin %0, %1 : vector<3xi16>
     ```
   }];
 }
@@ -750,14 +750,14 @@ def SPV_GLPowOp : SPV_GLBinaryArithmeticOp<"Pow", 26, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    pow-op ::= ssa-id `=` `spv.GL.Pow` ssa-use `:`
+    pow-op ::= ssa-id `=` `spirv.GL.Pow` ssa-use `:`
                restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Pow %0, %1 : f32
-    %3 = spv.GL.Pow %0, %1 : vector<3xf16>
+    %2 = spirv.GL.Pow %0, %1 : f32
+    %3 = spirv.GL.Pow %0, %1 : vector<3xf16>
     ```
   }];
 }
@@ -780,14 +780,14 @@ def SPV_GLFSignOp : SPV_GLUnaryArithmeticOp<"FSign", 6, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    sign-op ::= ssa-id `=` `spv.GL.FSign` ssa-use `:`
+    sign-op ::= ssa-id `=` `spirv.GL.FSign` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.FSign %0 : f32
-    %3 = spv.GL.FSign %1 : vector<3xf16>
+    %2 = spirv.GL.FSign %0 : f32
+    %3 = spirv.GL.FSign %1 : vector<3xf16>
     ```
   }];
 }
@@ -809,14 +809,14 @@ def SPV_GLSSignOp : SPV_GLUnaryArithmeticOp<"SSign", 7, SPV_Integer> {
     ```
     integer-scalar-vector-type ::= integer-type |
                                    `vector<` integer-literal `x` integer-type `>`
-    sign-op ::= ssa-id `=` `spv.GL.SSign` ssa-use `:`
+    sign-op ::= ssa-id `=` `spirv.GL.SSign` ssa-use `:`
                 integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.SSign %0 : i32
-    %3 = spv.GL.SSign %1 : vector<3xi16>
+    %2 = spirv.GL.SSign %0 : i32
+    %3 = spirv.GL.SSign %1 : vector<3xi16>
     ```
   }];
 }
@@ -839,14 +839,14 @@ def SPV_GLSqrtOp : SPV_GLUnaryArithmeticOp<"Sqrt", 31, SPV_Float> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    sqrt-op ::= ssa-id `=` `spv.GL.Sqrt` ssa-use `:`
+    sqrt-op ::= ssa-id `=` `spirv.GL.Sqrt` ssa-use `:`
                 float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Sqrt %0 : f32
-    %3 = spv.GL.Sqrt %1 : vector<3xf16>
+    %2 = spirv.GL.Sqrt %0 : f32
+    %3 = spirv.GL.Sqrt %1 : vector<3xf16>
     ```
   }];
 }
@@ -871,14 +871,14 @@ def SPV_GLSinhOp : SPV_GLUnaryArithmeticOp<"Sinh", 19, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    sinh-op ::= ssa-id `=` `spv.GL.Sinh` ssa-use `:`
+    sinh-op ::= ssa-id `=` `spirv.GL.Sinh` ssa-use `:`
                 restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Sinh %0 : f32
-    %3 = spv.GL.Sinh %1 : vector<3xf16>
+    %2 = spirv.GL.Sinh %0 : f32
+    %3 = spirv.GL.Sinh %1 : vector<3xf16>
     ```
   }];
 }
@@ -903,14 +903,14 @@ def SPV_GLCoshOp : SPV_GLUnaryArithmeticOp<"Cosh", 20, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    cosh-op ::= ssa-id `=` `spv.GL.Cosh` ssa-use `:`
+    cosh-op ::= ssa-id `=` `spirv.GL.Cosh` ssa-use `:`
                 restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Cosh %0 : f32
-    %3 = spv.GL.Cosh %1 : vector<3xf16>
+    %2 = spirv.GL.Cosh %0 : f32
+    %3 = spirv.GL.Cosh %1 : vector<3xf16>
     ```
   }];
 }
@@ -935,14 +935,14 @@ def SPV_GLTanhOp : SPV_GLUnaryArithmeticOp<"Tanh", 21, SPV_Float16or32> {
     restricted-float-scalar-vector-type ::=
       restricted-float-scalar-type |
       `vector<` integer-literal `x` restricted-float-scalar-type `>`
-    tanh-op ::= ssa-id `=` `spv.GL.Tanh` ssa-use `:`
+    tanh-op ::= ssa-id `=` `spirv.GL.Tanh` ssa-use `:`
                 restricted-float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.Tanh %0 : f32
-    %3 = spv.GL.Tanh %1 : vector<3xf16>
+    %2 = spirv.GL.Tanh %0 : f32
+    %3 = spirv.GL.Tanh %1 : vector<3xf16>
     ```
   }];
 }
@@ -965,14 +965,14 @@ def SPV_GLFClampOp : SPV_GLTernaryArithmeticOp<"FClamp", 43, SPV_Float> {
 
     <!-- End of AutoGen section -->
     ```
-    fclamp-op ::= ssa-id `=` `spv.GL.FClamp` ssa-use, ssa-use, ssa-use `:`
+    fclamp-op ::= ssa-id `=` `spirv.GL.FClamp` ssa-use, ssa-use, ssa-use `:`
                float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.FClamp %x, %min, %max : f32
-    %3 = spv.GL.FClamp %x, %min, %max : vector<3xf16>
+    %2 = spirv.GL.FClamp %x, %min, %max : f32
+    %3 = spirv.GL.FClamp %x, %min, %max : vector<3xf16>
     ```
   }];
 }
@@ -994,14 +994,14 @@ def SPV_GLUClampOp : SPV_GLTernaryArithmeticOp<"UClamp", 44, SPV_Integer> {
 
     <!-- End of AutoGen section -->
     ```
-    uclamp-op ::= ssa-id `=` `spv.GL.UClamp` ssa-use, ssa-use, ssa-use `:`
+    uclamp-op ::= ssa-id `=` `spirv.GL.UClamp` ssa-use, ssa-use, ssa-use `:`
                unsigned-signless-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.UClamp %x, %min, %max : i32
-    %3 = spv.GL.UClamp %x, %min, %max : vector<3xui16>
+    %2 = spirv.GL.UClamp %x, %min, %max : i32
+    %3 = spirv.GL.UClamp %x, %min, %max : vector<3xui16>
     ```
   }];
 }
@@ -1023,14 +1023,14 @@ def SPV_GLSClampOp : SPV_GLTernaryArithmeticOp<"SClamp", 45, SPV_Integer> {
 
     <!-- End of AutoGen section -->
     ```
-    uclamp-op ::= ssa-id `=` `spv.GL.UClamp` ssa-use, ssa-use, ssa-use `:`
+    uclamp-op ::= ssa-id `=` `spirv.GL.UClamp` ssa-use, ssa-use, ssa-use `:`
                sgined-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.SClamp %x, %min, %max : si32
-    %3 = spv.GL.SClamp %x, %min, %max : vector<3xsi16>
+    %2 = spirv.GL.SClamp %x, %min, %max : si32
+    %3 = spirv.GL.SClamp %x, %min, %max : vector<3xsi16>
     ```
   }];
 }
@@ -1063,14 +1063,14 @@ def SPV_GLFmaOp : SPV_GLTernaryArithmeticOp<"Fma", 50, SPV_Float> {
 
     <!-- End of AutoGen section -->
     ```
-    fma-op ::= ssa-id `=` `spv.GL.Fma` ssa-use, ssa-use, ssa-use `:`
+    fma-op ::= ssa-id `=` `spirv.GL.Fma` ssa-use, ssa-use, ssa-use `:`
                float-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %0 = spv.GL.Fma %a, %b, %c : f32
-    %1 = spv.GL.Fma %a, %b, %c : vector<3xf16>
+    %0 = spirv.GL.Fma %a, %b, %c : f32
+    %1 = spirv.GL.Fma %a, %b, %c : vector<3xf16>
     ```
   }];
 }
@@ -1105,15 +1105,15 @@ def SPV_GLFrexpStructOp : SPV_GLOp<"FrexpStruct", 52, [NoSideEffect]> {
                                  `vector<` integer-literal `x` float-type `>`
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    frexpstruct-op ::= ssa-id `=` `spv.GL.FrexpStruct` ssa-use `:`
-                                  `!spv.struct<` float-scalar-vector-type `,`
+    frexpstruct-op ::= ssa-id `=` `spirv.GL.FrexpStruct` ssa-use `:`
+                                  `!spirv.struct<` float-scalar-vector-type `,`
                                                   integer-scalar-vector-type `>`
     ```
     #### Example:
 
     ```mlir
-    %2 = spv.GL.FrexpStruct %0 : f32 -> !spv.struct<f32, i32>
-    %3 = spv.GL.FrexpStruct %0 : vector<3xf32> -> !spv.struct<vector<3xf32>, vector<3xi32>>
+    %2 = spirv.GL.FrexpStruct %0 : f32 -> !spirv.struct<f32, i32>
+    %3 = spirv.GL.FrexpStruct %0 : vector<3xf32> -> !spirv.struct<vector<3xf32>, vector<3xi32>>
     ```
   }];
 
@@ -1163,8 +1163,8 @@ def SPV_GLLdexpOp :
     #### Example:
 
     ```mlir
-    %y = spv.GL.Ldexp %x : f32, %exp : i32 -> f32
-    %y = spv.GL.Ldexp %x : vector<3xf32>, %exp : vector<3xi32> -> vector<3xf32>
+    %y = spirv.GL.Ldexp %x : f32, %exp : i32 -> f32
+    %y = spirv.GL.Ldexp %x : vector<3xf32>, %exp : vector<3xi32> -> vector<3xf32>
     ```
   }];
 
@@ -1199,8 +1199,8 @@ def SPV_GLFMixOp :
     #### Example:
 
     ```mlir
-    %0 = spv.GL.FMix %x : f32, %y : f32, %a : f32 -> f32
-    %0 = spv.GL.FMix %x : vector<4xf32>, %y : vector<4xf32>, %a : vector<4xf32> -> vector<4xf32>
+    %0 = spirv.GL.FMix %x : f32, %y : f32, %a : f32 -> f32
+    %0 = spirv.GL.FMix %x : vector<4xf32>, %y : vector<4xf32>, %a : vector<4xf32> -> vector<4xf32>
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
index 40d91aa0038b..d344948c4ea1 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVGroupOps.td
@@ -51,7 +51,7 @@ def SPV_GroupBroadcastOp : SPV_Op<"GroupBroadcast",
                                `vector<` integer-literal `x` float-type `>`
     localid-type ::= integer-type |
                    `vector<` integer-literal `x` integer-type `>`
-    group-broadcast-op ::= ssa-id `=` `spv.GroupBroadcast` scope ssa_use,
+    group-broadcast-op ::= ssa-id `=` `spirv.GroupBroadcast` scope ssa_use,
                    ssa_use `:` integer-float-scalar-vector-type `,` localid-type
     ```mlir
 
@@ -62,8 +62,8 @@ def SPV_GroupBroadcastOp : SPV_Op<"GroupBroadcast",
     %vector_value = ... : vector<4xf32>
     %scalar_localid = ... : i32
     %vector_localid = ... : vector<3xi32>
-    %0 = spv.GroupBroadcast "Subgroup" %scalar_value, %scalar_localid : f32, i32
-    %1 = spv.GroupBroadcast "Workgroup" %vector_value, %vector_localid :
+    %0 = spirv.GroupBroadcast "Subgroup" %scalar_value, %scalar_localid : f32, i32
+    %1 = spirv.GroupBroadcast "Workgroup" %vector_value, %vector_localid :
       vector<4xf32>, vector<3xi32>
     ```
   }];
@@ -113,14 +113,14 @@ def SPV_KHRSubgroupBallotOp : SPV_KhrVendorOp<"SubgroupBallot", []> {
     <!-- End of AutoGen section -->
 
     ```
-    subgroup-ballot-op ::= ssa-id `=` `spv.KHR.SubgroupBallot`
+    subgroup-ballot-op ::= ssa-id `=` `spirv.KHR.SubgroupBallot`
                                 ssa-use `:` `vector` `<` 4 `x` `i32` `>`
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.KHR.SubgroupBallot %predicate : vector<4xi32>
+    %0 = spirv.KHR.SubgroupBallot %predicate : vector<4xi32>
     ```
   }];
 
@@ -168,14 +168,14 @@ def SPV_INTELSubgroupBlockReadOp : SPV_IntelVendorOp<"SubgroupBlockRead", []> {
     <!-- End of AutoGen section -->
 
     ```
-    subgroup-block-read-INTEL-op ::= ssa-id `=` `spv.INTEL.SubgroupBlockRead`
+    subgroup-block-read-INTEL-op ::= ssa-id `=` `spirv.INTEL.SubgroupBlockRead`
                                 storage-class ssa_use `:` spirv-element-type
     ```mlir
 
     #### Example:
 
     ```
-    %0 = spv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : i32
+    %0 = spirv.INTEL.SubgroupBlockRead "StorageBuffer" %ptr : i32
     ```
   }];
 
@@ -218,14 +218,14 @@ def SPV_INTELSubgroupBlockWriteOp : SPV_IntelVendorOp<"SubgroupBlockWrite", []>
     <!-- End of AutoGen section -->
 
     ```
-    subgroup-block-write-INTEL-op ::= ssa-id `=` `spv.INTEL.SubgroupBlockWrite`
+    subgroup-block-write-INTEL-op ::= ssa-id `=` `spirv.INTEL.SubgroupBlockWrite`
                       storage-class ssa_use `,` ssa-use `:` spirv-element-type
     ```mlir
 
     #### Example:
 
     ```
-    spv.INTEL.SubgroupBlockWrite "StorageBuffer" %ptr, %value : i32
+    spirv.INTEL.SubgroupBlockWrite "StorageBuffer" %ptr, %value : i32
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
index 1d5e20fa98b8..a359e6b18b31 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVImageOps.td
@@ -52,8 +52,8 @@ def SPV_ImageDrefGatherOp : SPV_Op<"ImageDrefGather", [NoSideEffect]> {
     ```
 
     ```mlir
-    %0 = spv.ImageDrefGather %1 : !spv.sampled_image<!spv.image<i32, Dim2D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>, %2 : vector<4xf32>, %3 : f32 -> vector<4xi32>
-    %0 = spv.ImageDrefGather %1 : !spv.sampled_image<!spv.image<i32, Dim2D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>, %2 : vector<4xf32>, %3 : f32 ["NonPrivateTexel"] : f32, f32 -> vector<4xi32>
+    %0 = spirv.ImageDrefGather %1 : !spirv.sampled_image<!spirv.image<i32, Dim2D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>, %2 : vector<4xf32>, %3 : f32 -> vector<4xi32>
+    %0 = spirv.ImageDrefGather %1 : !spirv.sampled_image<!spirv.image<i32, Dim2D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>, %2 : vector<4xf32>, %3 : f32 ["NonPrivateTexel"] : f32, f32 -> vector<4xi32>
     ```
   }];
 
@@ -116,9 +116,9 @@ def SPV_ImageQuerySizeOp : SPV_Op<"ImageQuerySize", [NoSideEffect]> {
     #### Example:
 
     ```mlir
-    %3 = spv.ImageQuerySize %0 : !spv.image<i32, Dim1D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown> -> i32
-    %4 = spv.ImageQuerySize %1 : !spv.image<i32, Dim2D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown> -> vector<2xi32>
-    %5 = spv.ImageQuerySize %2 : !spv.image<i32, Dim2D, NoDepth, Arrayed, SingleSampled, NoSampler, Unknown> -> vector<3xi32>
+    %3 = spirv.ImageQuerySize %0 : !spirv.image<i32, Dim1D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown> -> i32
+    %4 = spirv.ImageQuerySize %1 : !spirv.image<i32, Dim2D, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown> -> vector<2xi32>
+    %5 = spirv.ImageQuerySize %2 : !spirv.image<i32, Dim2D, NoDepth, Arrayed, SingleSampled, NoSampler, Unknown> -> vector<3xi32>
     ```
 
   }];
@@ -161,7 +161,7 @@ def SPV_ImageOp : SPV_Op<"Image",
     #### Example:
 
     ```mlir
-    %0 = spv.Image %1 : !spv.sampled_image<!spv.image<f32, Cube, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>
+    %0 = spirv.Image %1 : !spirv.sampled_image<!spirv.image<f32, Cube, NoDepth, NonArrayed, SingleSampled, NoSampler, Unknown>>
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td
index 0d8280d5ba01..30dc403fca17 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVJointMatrixOps.td
@@ -28,14 +28,14 @@ def SPV_INTELJointMatrixWorkItemLengthOp : SPV_IntelVendorOp<"JointMatrixWorkIte
     Type is a joint matrix type.
 
     ``` {.ebnf}
-    joint-matrix-length-op ::= ssa-id `=` `spv.INTEL.JointMatrixWorkItemLength
+    joint-matrix-length-op ::= ssa-id `=` `spirv.INTEL.JointMatrixWorkItemLength
                                     ` : ` joint-matrix-type
     ```
 
     For example:
 
     ```
-    %0 = spv.INTEL.JointMatrixWorkItemLength : !spv.jointmatrix<Subgroup, i32, 8, 16>
+    %0 = spirv.INTEL.JointMatrixWorkItemLength : !spirv.jointmatrix<Subgroup, i32, 8, 16>
     ```
   }];
 
@@ -85,10 +85,10 @@ def SPV_INTELJointMatrixLoadOp : SPV_IntelVendorOp<"JointMatrixLoad", []> {
 
     #### Example:
     ```mlir
-    %0 = spv.INTEL.JointMatrixLoad <Subgroup> <RowMajor> %ptr, %stride
-         {memory_access = #spv.memory_access<Volatile>} :
-         (!spv.ptr<i32, CrossWorkgroup>, i32) ->
-         !spv.jointmatrix<8x16xi32, ColumnMajor, Subgroup>
+    %0 = spirv.INTEL.JointMatrixLoad <Subgroup> <RowMajor> %ptr, %stride
+         {memory_access = #spirv.memory_access<Volatile>} :
+         (!spirv.ptr<i32, CrossWorkgroup>, i32) ->
+         !spirv.jointmatrix<8x16xi32, ColumnMajor, Subgroup>
     ```
   }];
 
@@ -149,10 +149,10 @@ def SPV_INTELJointMatrixMadOp : SPV_IntelVendorOp<"JointMatrixMad",
 
     #### Example:
     ```mlir
-    %r = spv.INTEL.JointMatrixMad <Subgroup> %a, %b, %c :
-         !spv.jointmatrix<8x32xi8, RowMajor, Subgroup>,
-         !spv.jointmatrix<32x8xi8, ColumnMajor, Subgroup>
-         -> !spv.jointmatrix<8x8xi32,  RowMajor, Subgroup>
+    %r = spirv.INTEL.JointMatrixMad <Subgroup> %a, %b, %c :
+         !spirv.jointmatrix<8x32xi8, RowMajor, Subgroup>,
+         !spirv.jointmatrix<32x8xi8, ColumnMajor, Subgroup>
+         -> !spirv.jointmatrix<8x8xi32,  RowMajor, Subgroup>
     ```
 
   }];
@@ -212,9 +212,9 @@ def SPV_INTELJointMatrixStoreOp : SPV_IntelVendorOp<"JointMatrixStore", []> {
 
     #### Example:
     ```mlir
-    spv.INTEL.JointMatrixStore <Subgroup> <ColumnMajor> %ptr, %m, %stride
-    {memory_access = #spv.memory_access<Volatile>} : (!spv.ptr<i32, Workgroup>,
-    !spv.jointmatrix<8x16xi32, RowMajor, Subgroup>, i32)
+    spirv.INTEL.JointMatrixStore <Subgroup> <ColumnMajor> %ptr, %m, %stride
+    {memory_access = #spirv.memory_access<Volatile>} : (!spirv.ptr<i32, Workgroup>,
+    !spirv.jointmatrix<8x16xi32, RowMajor, Subgroup>, i32)
     ```
 
   }];

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
index 8123f028f497..86ab90c33c53 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVLogicalOps.td
@@ -75,14 +75,14 @@ def SPV_FOrdEqualOp : SPV_LogicalBinaryOp<"FOrdEqual", SPV_Float, [Commutative]>
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fordequal-op ::= ssa-id `=` `spv.FOrdEqual` ssa-use, ssa-use
+    fordequal-op ::= ssa-id `=` `spirv.FOrdEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FOrdEqual %0, %1 : f32
-    %5 = spv.FOrdEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FOrdEqual %0, %1 : f32
+    %5 = spirv.FOrdEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -109,14 +109,14 @@ def SPV_FOrdGreaterThanOp : SPV_LogicalBinaryOp<"FOrdGreaterThan", SPV_Float, []
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fordgt-op ::= ssa-id `=` `spv.FOrdGreaterThan` ssa-use, ssa-use
+    fordgt-op ::= ssa-id `=` `spirv.FOrdGreaterThan` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FOrdGreaterThan %0, %1 : f32
-    %5 = spv.FOrdGreaterThan %2, %3 : vector<4xf32>
+    %4 = spirv.FOrdGreaterThan %0, %1 : f32
+    %5 = spirv.FOrdGreaterThan %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -143,14 +143,14 @@ def SPV_FOrdGreaterThanEqualOp : SPV_LogicalBinaryOp<"FOrdGreaterThanEqual", SPV
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fordgte-op ::= ssa-id `=` `spv.FOrdGreaterThanEqual` ssa-use, ssa-use
+    fordgte-op ::= ssa-id `=` `spirv.FOrdGreaterThanEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FOrdGreaterThanEqual %0, %1 : f32
-    %5 = spv.FOrdGreaterThanEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FOrdGreaterThanEqual %0, %1 : f32
+    %5 = spirv.FOrdGreaterThanEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -177,14 +177,14 @@ def SPV_FOrdLessThanOp : SPV_LogicalBinaryOp<"FOrdLessThan", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fordlt-op ::= ssa-id `=` `spv.FOrdLessThan` ssa-use, ssa-use
+    fordlt-op ::= ssa-id `=` `spirv.FOrdLessThan` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FOrdLessThan %0, %1 : f32
-    %5 = spv.FOrdLessThan %2, %3 : vector<4xf32>
+    %4 = spirv.FOrdLessThan %0, %1 : f32
+    %5 = spirv.FOrdLessThan %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -211,14 +211,14 @@ def SPV_FOrdLessThanEqualOp : SPV_LogicalBinaryOp<"FOrdLessThanEqual", SPV_Float
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fordlte-op ::= ssa-id `=` `spv.FOrdLessThanEqual` ssa-use, ssa-use
+    fordlte-op ::= ssa-id `=` `spirv.FOrdLessThanEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FOrdLessThanEqual %0, %1 : f32
-    %5 = spv.FOrdLessThanEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FOrdLessThanEqual %0, %1 : f32
+    %5 = spirv.FOrdLessThanEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -242,14 +242,14 @@ def SPV_FOrdNotEqualOp : SPV_LogicalBinaryOp<"FOrdNotEqual", SPV_Float, [Commuta
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    fordneq-op ::= ssa-id `=` `spv.FOrdNotEqual` ssa-use, ssa-use
+    fordneq-op ::= ssa-id `=` `spirv.FOrdNotEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FOrdNotEqual %0, %1 : f32
-    %5 = spv.FOrdNotEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FOrdNotEqual %0, %1 : f32
+    %5 = spirv.FOrdNotEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -273,14 +273,14 @@ def SPV_FUnordEqualOp : SPV_LogicalBinaryOp<"FUnordEqual", SPV_Float, [Commutati
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    funordequal-op ::= ssa-id `=` `spv.FUnordEqual` ssa-use, ssa-use
+    funordequal-op ::= ssa-id `=` `spirv.FUnordEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FUnordEqual %0, %1 : f32
-    %5 = spv.FUnordEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FUnordEqual %0, %1 : f32
+    %5 = spirv.FUnordEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -307,14 +307,14 @@ def SPV_FUnordGreaterThanOp : SPV_LogicalBinaryOp<"FUnordGreaterThan", SPV_Float
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    funordgt-op ::= ssa-id `=` `spv.FUnordGreaterThan` ssa-use, ssa-use
+    funordgt-op ::= ssa-id `=` `spirv.FUnordGreaterThan` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FUnordGreaterThan %0, %1 : f32
-    %5 = spv.FUnordGreaterThan %2, %3 : vector<4xf32>
+    %4 = spirv.FUnordGreaterThan %0, %1 : f32
+    %5 = spirv.FUnordGreaterThan %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -341,14 +341,14 @@ def SPV_FUnordGreaterThanEqualOp : SPV_LogicalBinaryOp<"FUnordGreaterThanEqual",
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    funordgte-op ::= ssa-id `=` `spv.FUnordGreaterThanEqual` ssa-use, ssa-use
+    funordgte-op ::= ssa-id `=` `spirv.FUnordGreaterThanEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FUnordGreaterThanEqual %0, %1 : f32
-    %5 = spv.FUnordGreaterThanEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FUnordGreaterThanEqual %0, %1 : f32
+    %5 = spirv.FUnordGreaterThanEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -375,14 +375,14 @@ def SPV_FUnordLessThanOp : SPV_LogicalBinaryOp<"FUnordLessThan", SPV_Float, []>
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    funordlt-op ::= ssa-id `=` `spv.FUnordLessThan` ssa-use, ssa-use
+    funordlt-op ::= ssa-id `=` `spirv.FUnordLessThan` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FUnordLessThan %0, %1 : f32
-    %5 = spv.FUnordLessThan %2, %3 : vector<4xf32>
+    %4 = spirv.FUnordLessThan %0, %1 : f32
+    %5 = spirv.FUnordLessThan %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -409,14 +409,14 @@ def SPV_FUnordLessThanEqualOp : SPV_LogicalBinaryOp<"FUnordLessThanEqual", SPV_F
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    funordlte-op ::= ssa-id `=` `spv.FUnordLessThanEqual` ssa-use, ssa-use
+    funordlte-op ::= ssa-id `=` `spirv.FUnordLessThanEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FUnordLessThanEqual %0, %1 : f32
-    %5 = spv.FUnordLessThanEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FUnordLessThanEqual %0, %1 : f32
+    %5 = spirv.FUnordLessThanEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -440,14 +440,14 @@ def SPV_FUnordNotEqualOp : SPV_LogicalBinaryOp<"FUnordNotEqual", SPV_Float, [Com
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    funordneq-op ::= ssa-id `=` `spv.FUnordNotEqual` ssa-use, ssa-use
+    funordneq-op ::= ssa-id `=` `spirv.FUnordNotEqual` ssa-use, ssa-use
     ```
 
     #### Example:
 
     ```mlir
-    %4 = spv.FUnordNotEqual %0, %1 : f32
-    %5 = spv.FUnordNotEqual %2, %3 : vector<4xf32>
+    %4 = spirv.FUnordNotEqual %0, %1 : f32
+    %5 = spirv.FUnordNotEqual %2, %3 : vector<4xf32>
     ```
   }];
 }
@@ -472,14 +472,14 @@ def SPV_IEqualOp : SPV_LogicalBinaryOp<"IEqual",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    iequal-op ::= ssa-id `=` `spv.IEqual` ssa-use, ssa-use
+    iequal-op ::= ssa-id `=` `spirv.IEqual` ssa-use, ssa-use
                              `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.IEqual %0, %1 : i32
-    %5 = spv.IEqual %2, %3 : vector<4xi32>
+    %4 = spirv.IEqual %0, %1 : i32
+    %5 = spirv.IEqual %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -505,14 +505,14 @@ def SPV_INotEqualOp : SPV_LogicalBinaryOp<"INotEqual",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    inot-equal-op ::= ssa-id `=` `spv.INotEqual` ssa-use, ssa-use
+    inot-equal-op ::= ssa-id `=` `spirv.INotEqual` ssa-use, ssa-use
                                  `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.INotEqual %0, %1 : i32
-    %5 = spv.INotEqual %2, %3 : vector<4xi32>
+    %4 = spirv.INotEqual %0, %1 : i32
+    %5 = spirv.INotEqual %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -536,15 +536,15 @@ def SPV_IsInfOp : SPV_LogicalUnaryOp<"IsInf", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    isinf-op ::= ssa-id `=` `spv.IsInf` ssa-use
+    isinf-op ::= ssa-id `=` `spirv.IsInf` ssa-use
                             `:` float-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.IsInf %0: f32
-    %3 = spv.IsInf %1: vector<4xi32>
+    %2 = spirv.IsInf %0: f32
+    %3 = spirv.IsInf %1: vector<4xi32>
     ```
   }];
 }
@@ -569,15 +569,15 @@ def SPV_IsNanOp : SPV_LogicalUnaryOp<"IsNan", SPV_Float, []> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    isnan-op ::= ssa-id `=` `spv.IsNan` ssa-use
+    isnan-op ::= ssa-id `=` `spirv.IsNan` ssa-use
                             `:` float-scalar-vector-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.IsNan %0: f32
-    %3 = spv.IsNan %1: vector<4xi32>
+    %2 = spirv.IsNan %0: f32
+    %3 = spirv.IsNan %1: vector<4xi32>
     ```
   }];
 }
@@ -605,15 +605,15 @@ def SPV_LogicalAndOp : SPV_LogicalBinaryOp<"LogicalAnd",
     <!-- End of AutoGen section -->
 
     ```
-    logical-and ::= `spv.LogicalAnd` ssa-use `,` ssa-use
+    logical-and ::= `spirv.LogicalAnd` ssa-use `,` ssa-use
                     `:` operand-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.LogicalAnd %0, %1 : i1
-    %2 = spv.LogicalAnd %0, %1 : vector<4xi1>
+    %2 = spirv.LogicalAnd %0, %1 : i1
+    %2 = spirv.LogicalAnd %0, %1 : vector<4xi1>
     ```
   }];
 
@@ -643,15 +643,15 @@ def SPV_LogicalEqualOp : SPV_LogicalBinaryOp<"LogicalEqual",
     <!-- End of AutoGen section -->
 
     ```
-    logical-equal ::= `spv.LogicalEqual` ssa-use `,` ssa-use
+    logical-equal ::= `spirv.LogicalEqual` ssa-use `,` ssa-use
                       `:` operand-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.LogicalEqual %0, %1 : i1
-    %2 = spv.LogicalEqual %0, %1 : vector<4xi1>
+    %2 = spirv.LogicalEqual %0, %1 : i1
+    %2 = spirv.LogicalEqual %0, %1 : vector<4xi1>
     ```
   }];
 }
@@ -675,14 +675,14 @@ def SPV_LogicalNotOp : SPV_LogicalUnaryOp<"LogicalNot",
     <!-- End of AutoGen section -->
 
     ```
-    logical-not ::= `spv.LogicalNot` ssa-use `:` operand-type
+    logical-not ::= `spirv.LogicalNot` ssa-use `:` operand-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.LogicalNot %0 : i1
-    %2 = spv.LogicalNot %0 : vector<4xi1>
+    %2 = spirv.LogicalNot %0 : i1
+    %2 = spirv.LogicalNot %0 : vector<4xi1>
     ```
   }];
 
@@ -712,15 +712,15 @@ def SPV_LogicalNotEqualOp : SPV_LogicalBinaryOp<"LogicalNotEqual",
     <!-- End of AutoGen section -->
 
     ```
-    logical-not-equal ::= `spv.LogicalNotEqual` ssa-use `,` ssa-use
+    logical-not-equal ::= `spirv.LogicalNotEqual` ssa-use `,` ssa-use
                           `:` operand-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.LogicalNotEqual %0, %1 : i1
-    %2 = spv.LogicalNotEqual %0, %1 : vector<4xi1>
+    %2 = spirv.LogicalNotEqual %0, %1 : i1
+    %2 = spirv.LogicalNotEqual %0, %1 : vector<4xi1>
     ```
   }];
 }
@@ -748,15 +748,15 @@ def SPV_LogicalOrOp : SPV_LogicalBinaryOp<"LogicalOr",
     <!-- End of AutoGen section -->
 
     ```
-    logical-or ::= `spv.LogicalOr` ssa-use `,` ssa-use
+    logical-or ::= `spirv.LogicalOr` ssa-use `,` ssa-use
                     `:` operand-type
     ```
 
     #### Example:
 
     ```mlir
-    %2 = spv.LogicalOr %0, %1 : i1
-    %2 = spv.LogicalOr %0, %1 : vector<4xi1>
+    %2 = spirv.LogicalOr %0, %1 : i1
+    %2 = spirv.LogicalOr %0, %1 : vector<4xi1>
     ```
   }];
 
@@ -786,14 +786,14 @@ def SPV_OrderedOp : SPV_LogicalBinaryOp<"Ordered", SPV_Float, [Commutative]> {
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    ordered-op ::= ssa-id `=` `spv.Ordered` ssa-use, ssa-use
+    ordered-op ::= ssa-id `=` `spirv.Ordered` ssa-use, ssa-use
     ```mlir
 
     #### Example:
 
     ```
-    %4 = spv.Ordered %0, %1 : f32
-    %5 = spv.Ordered %2, %3 : vector<4xf32>
+    %4 = spirv.Ordered %0, %1 : f32
+    %5 = spirv.Ordered %2, %3 : vector<4xf32>
     ```
   }];
 
@@ -827,14 +827,14 @@ def SPV_SGreaterThanOp : SPV_LogicalBinaryOp<"SGreaterThan",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    sgreater-than-op ::= ssa-id `=` `spv.SGreaterThan` ssa-use, ssa-use
+    sgreater-than-op ::= ssa-id `=` `spirv.SGreaterThan` ssa-use, ssa-use
                                     `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.SGreaterThan %0, %1 : i32
-    %5 = spv.SGreaterThan %2, %3 : vector<4xi32>
+    %4 = spirv.SGreaterThan %0, %1 : i32
+    %5 = spirv.SGreaterThan %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -864,14 +864,14 @@ def SPV_SGreaterThanEqualOp : SPV_LogicalBinaryOp<"SGreaterThanEqual",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    sgreater-than-equal-op ::= ssa-id `=` `spv.SGreaterThanEqual` ssa-use, ssa-use
+    sgreater-than-equal-op ::= ssa-id `=` `spirv.SGreaterThanEqual` ssa-use, ssa-use
                                           `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```
-    %4 = spv.SGreaterThanEqual %0, %1 : i32
-    %5 = spv.SGreaterThanEqual %2, %3 : vector<4xi32>
+    %4 = spirv.SGreaterThanEqual %0, %1 : i32
+    %5 = spirv.SGreaterThanEqual %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -899,14 +899,14 @@ def SPV_SLessThanOp : SPV_LogicalBinaryOp<"SLessThan",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    sless-than-op ::= ssa-id `=` `spv.SLessThan` ssa-use, ssa-use
+    sless-than-op ::= ssa-id `=` `spirv.SLessThan` ssa-use, ssa-use
                                  `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.SLessThan %0, %1 : i32
-    %5 = spv.SLessThan %2, %3 : vector<4xi32>
+    %4 = spirv.SLessThan %0, %1 : i32
+    %5 = spirv.SLessThan %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -936,14 +936,14 @@ def SPV_SLessThanEqualOp : SPV_LogicalBinaryOp<"SLessThanEqual",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    sless-than-equal-op ::= ssa-id `=` `spv.SLessThanEqual` ssa-use, ssa-use
+    sless-than-equal-op ::= ssa-id `=` `spirv.SLessThanEqual` ssa-use, ssa-use
                                        `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.SLessThanEqual %0, %1 : i32
-    %5 = spv.SLessThanEqual %2, %3 : vector<4xi32>
+    %4 = spirv.SLessThanEqual %0, %1 : i32
+    %5 = spirv.SLessThanEqual %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -985,16 +985,16 @@ def SPV_SelectOp : SPV_Op<"Select",
                            | pointer-type
     select-condition-type ::= boolean-type
                               | `vector<` integer-literal `x` boolean-type `>`
-    select-op ::= ssa-id `=` `spv.Select` ssa-use, ssa-use, ssa-use
+    select-op ::= ssa-id `=` `spirv.Select` ssa-use, ssa-use, ssa-use
                   `:` select-condition-type `,` select-object-type
     ```
 
     #### Example:
 
     ```mlir
-    %3 = spv.Select %0, %1, %2 : i1, f32
-    %3 = spv.Select %0, %1, %2 : i1, vector<3xi32>
-    %3 = spv.Select %0, %1, %2 : vector<3xi1>, vector<3xf32>
+    %3 = spirv.Select %0, %1, %2 : i1, f32
+    %3 = spirv.Select %0, %1, %2 : i1, vector<3xi32>
+    %3 = spirv.Select %0, %1, %2 : vector<3xi1>, vector<3xf32>
     ```
   }];
 
@@ -1036,14 +1036,14 @@ def SPV_UGreaterThanOp : SPV_LogicalBinaryOp<"UGreaterThan",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    ugreater-than-op ::= ssa-id `=` `spv.UGreaterThan` ssa-use, ssa-use
+    ugreater-than-op ::= ssa-id `=` `spirv.UGreaterThan` ssa-use, ssa-use
                                     `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.UGreaterThan %0, %1 : i32
-    %5 = spv.UGreaterThan %2, %3 : vector<4xi32>
+    %4 = spirv.UGreaterThan %0, %1 : i32
+    %5 = spirv.UGreaterThan %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -1073,14 +1073,14 @@ def SPV_UGreaterThanEqualOp : SPV_LogicalBinaryOp<"UGreaterThanEqual",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    ugreater-than-equal-op ::= ssa-id `=` `spv.UGreaterThanEqual` ssa-use, ssa-use
+    ugreater-than-equal-op ::= ssa-id `=` `spirv.UGreaterThanEqual` ssa-use, ssa-use
                                           `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.UGreaterThanEqual %0, %1 : i32
-    %5 = spv.UGreaterThanEqual %2, %3 : vector<4xi32>
+    %4 = spirv.UGreaterThanEqual %0, %1 : i32
+    %5 = spirv.UGreaterThanEqual %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -1108,14 +1108,14 @@ def SPV_ULessThanOp : SPV_LogicalBinaryOp<"ULessThan",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    uless-than-op ::= ssa-id `=` `spv.ULessThan` ssa-use, ssa-use
+    uless-than-op ::= ssa-id `=` `spirv.ULessThan` ssa-use, ssa-use
                                  `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.ULessThan %0, %1 : i32
-    %5 = spv.ULessThan %2, %3 : vector<4xi32>
+    %4 = spirv.ULessThan %0, %1 : i32
+    %5 = spirv.ULessThan %2, %3 : vector<4xi32>
 
     ```
   }];
@@ -1144,14 +1144,14 @@ def SPV_UnorderedOp : SPV_LogicalBinaryOp<"Unordered", SPV_Float, [Commutative]>
     ```
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    unordered-op ::= ssa-id `=` `spv.Unordered` ssa-use, ssa-use
+    unordered-op ::= ssa-id `=` `spirv.Unordered` ssa-use, ssa-use
     ```mlir
 
     #### Example:
 
     ```
-    %4 = spv.Unordered %0, %1 : f32
-    %5 = spv.Unordered %2, %3 : vector<4xf32>
+    %4 = spirv.Unordered %0, %1 : f32
+    %5 = spirv.Unordered %2, %3 : vector<4xf32>
     ```
   }];
 
@@ -1187,14 +1187,14 @@ def SPV_ULessThanEqualOp : SPV_LogicalBinaryOp<"ULessThanEqual",
     ```
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    uless-than-equal-op ::= ssa-id `=` `spv.ULessThanEqual` ssa-use, ssa-use
+    uless-than-equal-op ::= ssa-id `=` `spirv.ULessThanEqual` ssa-use, ssa-use
                                        `:` integer-scalar-vector-type
     ```
     #### Example:
 
     ```mlir
-    %4 = spv.ULessThanEqual %0, %1 : i32
-    %5 = spv.ULessThanEqual %2, %3 : vector<4xi32>
+    %4 = spirv.ULessThanEqual %0, %1 : i32
+    %5 = spirv.ULessThanEqual %2, %3 : vector<4xi32>
 
     ```
   }];

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
index d45259f7e389..9ae62eec28f6 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMatrixOps.td
@@ -34,16 +34,16 @@ def SPV_MatrixTimesMatrixOp : SPV_Op<"MatrixTimesMatrix", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    matrix-times-matrix-op ::= ssa-id `=` `spv.MatrixTimesMatrix` ssa-use,
+    matrix-times-matrix-op ::= ssa-id `=` `spirv.MatrixTimesMatrix` ssa-use,
     ssa-use `:` matrix-type `,` matrix-type `->` matrix-type
     ```mlir
 
     #### Example:
 
     ```
-    %0 = spv.MatrixTimesMatrix %matrix_1, %matrix_2 :
-        !spv.matrix<4 x vector<3xf32>>, !spv.matrix<3 x vector<4xf32>> ->
-        !spv.matrix<4 x vector<4xf32>>
+    %0 = spirv.MatrixTimesMatrix %matrix_1, %matrix_2 :
+        !spirv.matrix<4 x vector<3xf32>>, !spirv.matrix<3 x vector<4xf32>> ->
+        !spirv.matrix<4 x vector<4xf32>>
     ```
   }];
 
@@ -85,7 +85,7 @@ def SPV_MatrixTimesScalarOp : SPV_Op<"MatrixTimesScalar", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    matrix-times-scalar-op ::= ssa-id `=` `spv.MatrixTimesScalar` ssa-use,
+    matrix-times-scalar-op ::= ssa-id `=` `spirv.MatrixTimesScalar` ssa-use,
     ssa-use `:` matrix-type `,` float-type `->` matrix-type
 
     ```
@@ -94,8 +94,8 @@ def SPV_MatrixTimesScalarOp : SPV_Op<"MatrixTimesScalar", [NoSideEffect]> {
 
     ```mlir
 
-    %0 = spv.MatrixTimesScalar %matrix, %scalar :
-    !spv.matrix<3 x vector<3xf32>>, f32 -> !spv.matrix<3 x vector<3xf32>>
+    %0 = spirv.MatrixTimesScalar %matrix, %scalar :
+    !spirv.matrix<3 x vector<3xf32>>, f32 -> !spirv.matrix<3 x vector<3xf32>>
 
     ```
   }];
@@ -148,7 +148,7 @@ def SPV_TransposeOp : SPV_Op<"Transpose", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    transpose-op ::= ssa-id `=` `spv.Transpose` ssa-use `:` matrix-type `->`
+    transpose-op ::= ssa-id `=` `spirv.Transpose` ssa-use `:` matrix-type `->`
     matrix-type
 
     ```mlir
@@ -156,8 +156,8 @@ def SPV_TransposeOp : SPV_Op<"Transpose", [NoSideEffect]> {
     #### Example:
 
     ```
-    %0 = spv.Transpose %matrix: !spv.matrix<2 x vector<3xf32>> ->
-    !spv.matrix<3 x vector<2xf32>>
+    %0 = spirv.Transpose %matrix: !spirv.matrix<2 x vector<3xf32>> ->
+    !spirv.matrix<3 x vector<2xf32>>
 
     ```
   }];

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
index 4d22316debeb..66f185463558 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMemoryOps.td
@@ -47,7 +47,7 @@ def SPV_AccessChainOp : SPV_Op<"AccessChain", [NoSideEffect]> {
 
     <!-- End of AutoGen section -->
     ```
-    access-chain-op ::= ssa-id `=` `spv.AccessChain` ssa-use
+    access-chain-op ::= ssa-id `=` `spirv.AccessChain` ssa-use
                         `[` ssa-use (',' ssa-use)* `]`
                         `:` pointer-type
     ```
@@ -55,10 +55,10 @@ def SPV_AccessChainOp : SPV_Op<"AccessChain", [NoSideEffect]> {
     #### Example:
 
     ```mlir
-    %0 = "spv.Constant"() { value = 1: i32} : () -> i32
-    %1 = spv.Variable : !spv.ptr<!spv.struct<f32, !spv.array<4xf32>>, Function>
-    %2 = spv.AccessChain %1[%0] : !spv.ptr<!spv.struct<f32, !spv.array<4xf32>>, Function>
-    %3 = spv.Load "Function" %2 ["Volatile"] : !spv.array<4xf32>
+    %0 = "spirv.Constant"() { value = 1: i32} : () -> i32
+    %1 = spirv.Variable : !spirv.ptr<!spirv.struct<f32, !spirv.array<4xf32>>, Function>
+    %2 = spirv.AccessChain %1[%0] : !spirv.ptr<!spirv.struct<f32, !spirv.array<4xf32>>, Function>
+    %3 = spirv.Load "Function" %2 ["Volatile"] : !spirv.array<4xf32>
     ```
   }];
 
@@ -101,7 +101,7 @@ def SPV_CopyMemoryOp : SPV_Op<"CopyMemory", []> {
     <!-- End of AutoGen section -->
 
     ```
-    copy-memory-op ::= `spv.CopyMemory ` storage-class ssa-use
+    copy-memory-op ::= `spirv.CopyMemory ` storage-class ssa-use
                        storage-class ssa-use
                        (`[` memory-access `]` (`, [` memory-access `]`)?)?
                        ` : ` spirv-element-type
@@ -110,9 +110,9 @@ def SPV_CopyMemoryOp : SPV_Op<"CopyMemory", []> {
     #### Example:
 
     ```mlir
-    %0 = spv.Variable : !spv.ptr<f32, Function>
-    %1 = spv.Variable : !spv.ptr<f32, Function>
-    spv.CopyMemory "Function" %0, "Function" %1 : f32
+    %0 = spirv.Variable : !spirv.ptr<f32, Function>
+    %1 = spirv.Variable : !spirv.ptr<f32, Function>
+    spirv.CopyMemory "Function" %0, "Function" %1 : f32
     ```
   }];
 
@@ -144,7 +144,7 @@ def SPV_InBoundsPtrAccessChainOp : SPV_Op<"InBoundsPtrAccessChain", [NoSideEffec
     <!-- End of AutoGen section -->
 
     ```
-    access-chain-op ::= ssa-id `=` `spv.InBoundsPtrAccessChain` ssa-use
+    access-chain-op ::= ssa-id `=` `spirv.InBoundsPtrAccessChain` ssa-use
                         `[` ssa-use (',' ssa-use)* `]`
                         `:` pointer-type
     ```mlir
@@ -152,8 +152,8 @@ def SPV_InBoundsPtrAccessChainOp : SPV_Op<"InBoundsPtrAccessChain", [NoSideEffec
     #### Example:
 
     ```
-    func @inbounds_ptr_access_chain(%arg0: !spv.ptr<f32, CrossWorkgroup>, %arg1 : i64) -> () {
-      %0 = spv.InBoundsPtrAccessChain %arg0[%arg1] : !spv.ptr<f32, CrossWorkgroup>, i64
+    func @inbounds_ptr_access_chain(%arg0: !spirv.ptr<f32, CrossWorkgroup>, %arg1 : i64) -> () {
+      %0 = spirv.InBoundsPtrAccessChain %arg0[%arg1] : !spirv.ptr<f32, CrossWorkgroup>, i64
       ...
     }
     ```
@@ -202,17 +202,17 @@ def SPV_LoadOp : SPV_Op<"Load", []> {
     memory-access ::= `"None"` | `"Volatile"` | `"Aligned", ` integer-literal
                     | `"NonTemporal"`
 
-    load-op ::= ssa-id ` = spv.Load ` storage-class ssa-use
+    load-op ::= ssa-id ` = spirv.Load ` storage-class ssa-use
                 (`[` memory-access `]`)? ` : ` spirv-element-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.Variable : !spv.ptr<f32, Function>
-    %1 = spv.Load "Function" %0 : f32
-    %2 = spv.Load "Function" %0 ["Volatile"] : f32
-    %3 = spv.Load "Function" %0 ["Aligned", 4] : f32
+    %0 = spirv.Variable : !spirv.ptr<f32, Function>
+    %1 = spirv.Load "Function" %0 : f32
+    %2 = spirv.Load "Function" %0 ["Volatile"] : f32
+    %3 = spirv.Load "Function" %0 ["Aligned", 4] : f32
     ```
   }];
 
@@ -270,7 +270,7 @@ def SPV_PtrAccessChainOp : SPV_Op<"PtrAccessChain", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    [access-chain-op ::= ssa-id `=` `spv.PtrAccessChain` ssa-use
+    [access-chain-op ::= ssa-id `=` `spirv.PtrAccessChain` ssa-use
                         `[` ssa-use (',' ssa-use)* `]`
                         `:` pointer-type
     ```mlir
@@ -278,8 +278,8 @@ def SPV_PtrAccessChainOp : SPV_Op<"PtrAccessChain", [NoSideEffect]> {
     #### Example:
 
     ```
-    func @ptr_access_chain(%arg0: !spv.ptr<f32, CrossWorkgroup>, %arg1 : i64) -> () {
-      %0 = spv.PtrAccessChain %arg0[%arg1] : !spv.ptr<f32, CrossWorkgroup>, i64
+    func @ptr_access_chain(%arg0: !spirv.ptr<f32, CrossWorkgroup>, %arg1 : i64) -> () {
+      %0 = spirv.PtrAccessChain %arg0[%arg1] : !spirv.ptr<f32, CrossWorkgroup>, i64
       ...
     }
     ```
@@ -323,18 +323,18 @@ def SPV_StoreOp : SPV_Op<"Store", []> {
     <!-- End of AutoGen section -->
 
     ```
-    store-op ::= `spv.Store ` storage-class ssa-use `, ` ssa-use `, `
+    store-op ::= `spirv.Store ` storage-class ssa-use `, ` ssa-use `, `
                   (`[` memory-access `]`)? `:` spirv-element-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.Variable : !spv.ptr<f32, Function>
-    %1 = spv.FMul ... : f32
-    spv.Store "Function" %0, %1 : f32
-    spv.Store "Function" %0, %1 ["Volatile"] : f32
-    spv.Store "Function" %0, %1 ["Aligned", 4] : f32
+    %0 = spirv.Variable : !spirv.ptr<f32, Function>
+    %1 = spirv.FMul ... : f32
+    spirv.Store "Function" %0, %1 : f32
+    spirv.Store "Function" %0, %1 ["Volatile"] : f32
+    spirv.Store "Function" %0, %1 ["Aligned", 4] : f32
     ```
   }];
 
@@ -383,7 +383,7 @@ def SPV_VariableOp : SPV_Op<"Variable", []> {
     <!-- End of AutoGen section -->
 
     ```
-    variable-op ::= ssa-id `=` `spv.Variable` (`init(` ssa-use `)`)?
+    variable-op ::= ssa-id `=` `spirv.Variable` (`init(` ssa-use `)`)?
                     attribute-dict? `:` spirv-pointer-type
     ```
 
@@ -392,10 +392,10 @@ def SPV_VariableOp : SPV_Op<"Variable", []> {
     #### Example:
 
     ```mlir
-    %0 = spv.Constant ...
+    %0 = spirv.Constant ...
 
-    %1 = spv.Variable : !spv.ptr<f32, Function>
-    %2 = spv.Variable init(%0): !spv.ptr<f32, Function>
+    %1 = spirv.Variable : !spirv.ptr<f32, Function>
+    %2 = spirv.Variable init(%0): !spirv.ptr<f32, Function>
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td
index 7ef33c9a09e8..78a3ebdaf87d 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVMiscOps.td
@@ -27,13 +27,13 @@ def SPV_KHRAssumeTrueOp : SPV_KhrVendorOp<"AssumeTrue", []> {
     <!-- End of AutoGen section -->
 
     ```
-    assumetruekhr-op ::= `spv.KHR.AssumeTrue` ssa-use
+    assumetruekhr-op ::= `spirv.KHR.AssumeTrue` ssa-use
     ```mlir
 
     #### Example:
 
     ```
-    spv.KHR.AssumeTrue %arg
+    spirv.KHR.AssumeTrue %arg
     ```
   }];
 
@@ -69,14 +69,14 @@ def SPV_UndefOp : SPV_Op<"Undef", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    undef-op ::= `spv.Undef` `:` spirv-type
+    undef-op ::= `spirv.Undef` `:` spirv-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.Undef : f32
-    %1 = spv.Undef : !spv.struct<!spv.array<4 x vector<4xi32>>>
+    %0 = spirv.Undef : f32
+    %1 = spirv.Undef : !spirv.struct<!spirv.array<4 x vector<4xi32>>>
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
index 33eb83a1e745..3afc55f341b3 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVNonUniformOps.td
@@ -57,14 +57,14 @@ def SPV_GroupNonUniformBallotOp : SPV_Op<"GroupNonUniformBallot", []> {
 
     ```
     scope ::= `"Workgroup"` | `"Subgroup"`
-    non-uniform-ballot-op ::= ssa-id `=` `spv.GroupNonUniformBallot` scope
+    non-uniform-ballot-op ::= ssa-id `=` `spirv.GroupNonUniformBallot` scope
                               ssa-use `:` `vector` `<` 4 `x` `integer-type` `>`
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.GroupNonUniformBallot "SubGroup" %predicate : vector<4xi32>
+    %0 = spirv.GroupNonUniformBallot "SubGroup" %predicate : vector<4xi32>
     ```
   }];
 
@@ -122,7 +122,7 @@ def SPV_GroupNonUniformBroadcastOp : SPV_Op<"GroupNonUniformBroadcast",
                                `vector<` integer-literal `x` integer-type `>` |
                                `vector<` integer-literal `x` float-type `>`
     group-non-uniform-broadcast-op ::= ssa-id `=`
-        `spv.GroupNonUniformBroadcast` scope ssa_use, ssa_use
+        `spirv.GroupNonUniformBroadcast` scope ssa_use, ssa_use
         `:` integer-float-scalar-vector-type `,` integer-type
     ```mlir
 
@@ -132,8 +132,8 @@ def SPV_GroupNonUniformBroadcastOp : SPV_Op<"GroupNonUniformBroadcast",
     %scalar_value = ... : f32
     %vector_value = ... : vector<4xf32>
     %id = ... : i32
-    %0 = spv.GroupNonUniformBroadcast "Subgroup" %scalar_value, %id : f32, i32
-    %1 = spv.GroupNonUniformBroadcast "Workgroup" %vector_value, %id :
+    %0 = spirv.GroupNonUniformBroadcast "Subgroup" %scalar_value, %id : f32, i32
+    %1 = spirv.GroupNonUniformBroadcast "Workgroup" %vector_value, %id :
       vector<4xf32>, i32
     ```
   }];
@@ -177,14 +177,14 @@ def SPV_GroupNonUniformElectOp : SPV_Op<"GroupNonUniformElect", []> {
 
     ```
     scope ::= `"Workgroup"` | `"Subgroup"`
-    non-uniform-elect-op ::= ssa-id `=` `spv.GroupNonUniformElect` scope
+    non-uniform-elect-op ::= ssa-id `=` `spirv.GroupNonUniformElect` scope
                              `:` `i1`
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.GroupNonUniformElect : i1
+    %0 = spirv.GroupNonUniformElect : i1
     ```
   }];
 
@@ -239,7 +239,7 @@ def SPV_GroupNonUniformFAddOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    non-uniform-fadd-op ::= ssa-id `=` `spv.GroupNonUniformFAdd` scope operation
+    non-uniform-fadd-op ::= ssa-id `=` `spirv.GroupNonUniformFAdd` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` float-scalar-vector-type
     ```
@@ -247,11 +247,11 @@ def SPV_GroupNonUniformFAddOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spv.GroupNonUniformFAdd "Workgroup" "Reduce" %scalar : f32
-    %1 = spv.GroupNonUniformFAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFAdd "Workgroup" "Reduce" %scalar : f32
+    %1 = spirv.GroupNonUniformFAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
     ```
   }];
 
@@ -299,7 +299,7 @@ def SPV_GroupNonUniformFMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    non-uniform-fmax-op ::= ssa-id `=` `spv.GroupNonUniformFMax` scope operation
+    non-uniform-fmax-op ::= ssa-id `=` `spirv.GroupNonUniformFMax` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` float-scalar-vector-type
     ```
@@ -307,11 +307,11 @@ def SPV_GroupNonUniformFMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spv.GroupNonUniformFMax "Workgroup" "Reduce" %scalar : f32
-    %1 = spv.GroupNonUniformFMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFMax "Workgroup" "Reduce" %scalar : f32
+    %1 = spirv.GroupNonUniformFMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
     ```
   }];
 
@@ -359,7 +359,7 @@ def SPV_GroupNonUniformFMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    non-uniform-fmin-op ::= ssa-id `=` `spv.GroupNonUniformFMin` scope operation
+    non-uniform-fmin-op ::= ssa-id `=` `spirv.GroupNonUniformFMin` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` float-scalar-vector-type
     ```
@@ -367,11 +367,11 @@ def SPV_GroupNonUniformFMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spv.GroupNonUniformFMin "Workgroup" "Reduce" %scalar : f32
-    %1 = spv.GroupNonUniformFMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFMin "Workgroup" "Reduce" %scalar : f32
+    %1 = spirv.GroupNonUniformFMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
     ```
   }];
 
@@ -416,7 +416,7 @@ def SPV_GroupNonUniformFMulOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     float-scalar-vector-type ::= float-type |
                                  `vector<` integer-literal `x` float-type `>`
-    non-uniform-fmul-op ::= ssa-id `=` `spv.GroupNonUniformFMul` scope operation
+    non-uniform-fmul-op ::= ssa-id `=` `spirv.GroupNonUniformFMul` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` float-scalar-vector-type
     ```
@@ -424,11 +424,11 @@ def SPV_GroupNonUniformFMulOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : f32
     %vector = ... : vector<4xf32>
-    %0 = spv.GroupNonUniformFMul "Workgroup" "Reduce" %scalar : f32
-    %1 = spv.GroupNonUniformFMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
+    %0 = spirv.GroupNonUniformFMul "Workgroup" "Reduce" %scalar : f32
+    %1 = spirv.GroupNonUniformFMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xf32>
     ```
   }];
 
@@ -471,7 +471,7 @@ def SPV_GroupNonUniformIAddOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    non-uniform-iadd-op ::= ssa-id `=` `spv.GroupNonUniformIAdd` scope operation
+    non-uniform-iadd-op ::= ssa-id `=` `spirv.GroupNonUniformIAdd` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` integer-scalar-vector-type
     ```
@@ -479,11 +479,11 @@ def SPV_GroupNonUniformIAddOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spv.GroupNonUniformIAdd "Workgroup" "Reduce" %scalar : i32
-    %1 = spv.GroupNonUniformIAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformIAdd "Workgroup" "Reduce" %scalar : i32
+    %1 = spirv.GroupNonUniformIAdd "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
     ```
   }];
 
@@ -526,7 +526,7 @@ def SPV_GroupNonUniformIMulOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    non-uniform-imul-op ::= ssa-id `=` `spv.GroupNonUniformIMul` scope operation
+    non-uniform-imul-op ::= ssa-id `=` `spirv.GroupNonUniformIMul` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` integer-scalar-vector-type
     ```
@@ -534,11 +534,11 @@ def SPV_GroupNonUniformIMulOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spv.GroupNonUniformIMul "Workgroup" "Reduce" %scalar : i32
-    %1 = spv.GroupNonUniformIMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformIMul "Workgroup" "Reduce" %scalar : i32
+    %1 = spirv.GroupNonUniformIMul "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
     ```
   }];
 
@@ -583,7 +583,7 @@ def SPV_GroupNonUniformSMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    non-uniform-smax-op ::= ssa-id `=` `spv.GroupNonUniformSMax` scope operation
+    non-uniform-smax-op ::= ssa-id `=` `spirv.GroupNonUniformSMax` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` integer-scalar-vector-type
     ```
@@ -591,11 +591,11 @@ def SPV_GroupNonUniformSMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spv.GroupNonUniformSMax "Workgroup" "Reduce" %scalar : i32
-    %1 = spv.GroupNonUniformSMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformSMax "Workgroup" "Reduce" %scalar : i32
+    %1 = spirv.GroupNonUniformSMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
     ```
   }];
 
@@ -640,7 +640,7 @@ def SPV_GroupNonUniformSMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    non-uniform-smin-op ::= ssa-id `=` `spv.GroupNonUniformSMin` scope operation
+    non-uniform-smin-op ::= ssa-id `=` `spirv.GroupNonUniformSMin` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` integer-scalar-vector-type
     ```
@@ -648,11 +648,11 @@ def SPV_GroupNonUniformSMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spv.GroupNonUniformSMin "Workgroup" "Reduce" %scalar : i32
-    %1 = spv.GroupNonUniformSMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformSMin "Workgroup" "Reduce" %scalar : i32
+    %1 = spirv.GroupNonUniformSMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
     ```
   }];
 
@@ -690,7 +690,7 @@ def SPV_GroupNonUniformShuffleOp : SPV_Op<"GroupNonUniformShuffle",
     #### Example:
 
     ```mlir
-    %0 = spv.GroupNonUniformShuffle <Subgroup> %val, %id : f32, i32
+    %0 = spirv.GroupNonUniformShuffle <Subgroup> %val, %id : f32, i32
     ```
   }];
 
@@ -745,7 +745,7 @@ def SPV_GroupNonUniformShuffleDownOp : SPV_Op<"GroupNonUniformShuffleDown",
     #### Example:
 
     ```mlir
-    %0 = spv.GroupNonUniformShuffleDown <Subgroup> %val, %delta : f32, i32
+    %0 = spirv.GroupNonUniformShuffleDown <Subgroup> %val, %delta : f32, i32
     ```
   }];
 
@@ -799,7 +799,7 @@ def SPV_GroupNonUniformShuffleUpOp : SPV_Op<"GroupNonUniformShuffleUp",
     #### Example:
 
     ```mlir
-    %0 = spv.GroupNonUniformShuffleUp <Subgroup> %val, %delta : f32, i32
+    %0 = spirv.GroupNonUniformShuffleUp <Subgroup> %val, %delta : f32, i32
     ```
   }];
 
@@ -853,7 +853,7 @@ def SPV_GroupNonUniformShuffleXorOp : SPV_Op<"GroupNonUniformShuffleXor",
     #### Example:
 
     ```mlir
-    %0 = spv.GroupNonUniformShuffleXor <Subgroup> %val, %mask : f32, i32
+    %0 = spirv.GroupNonUniformShuffleXor <Subgroup> %val, %mask : f32, i32
     ```
   }];
 
@@ -913,7 +913,7 @@ def SPV_GroupNonUniformUMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    non-uniform-umax-op ::= ssa-id `=` `spv.GroupNonUniformUMax` scope operation
+    non-uniform-umax-op ::= ssa-id `=` `spirv.GroupNonUniformUMax` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` integer-scalar-vector-type
     ```
@@ -921,11 +921,11 @@ def SPV_GroupNonUniformUMaxOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spv.GroupNonUniformUMax "Workgroup" "Reduce" %scalar : i32
-    %1 = spv.GroupNonUniformUMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformUMax "Workgroup" "Reduce" %scalar : i32
+    %1 = spirv.GroupNonUniformUMax "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
     ```
   }];
 
@@ -971,7 +971,7 @@ def SPV_GroupNonUniformUMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     operation ::= `"Reduce"` | `"InclusiveScan"` | `"ExclusiveScan"` | ...
     integer-scalar-vector-type ::= integer-type |
                                  `vector<` integer-literal `x` integer-type `>`
-    non-uniform-umin-op ::= ssa-id `=` `spv.GroupNonUniformUMin` scope operation
+    non-uniform-umin-op ::= ssa-id `=` `spirv.GroupNonUniformUMin` scope operation
                             ssa-use ( `cluster_size` `(` ssa_use `)` )?
                             `:` integer-scalar-vector-type
     ```
@@ -979,11 +979,11 @@ def SPV_GroupNonUniformUMinOp : SPV_GroupNonUniformArithmeticOp<"GroupNonUniform
     #### Example:
 
     ```mlir
-    %four = spv.Constant 4 : i32
+    %four = spirv.Constant 4 : i32
     %scalar = ... : i32
     %vector = ... : vector<4xi32>
-    %0 = spv.GroupNonUniformUMin "Workgroup" "Reduce" %scalar : i32
-    %1 = spv.GroupNonUniformUMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
+    %0 = spirv.GroupNonUniformUMin "Workgroup" "Reduce" %scalar : i32
+    %1 = spirv.GroupNonUniformUMin "Subgroup" "ClusteredReduce" %vector cluster_size(%four) : vector<4xi32>
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
index 6688f059119d..0fab4308892f 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVStructureOps.td
@@ -42,14 +42,14 @@ def SPV_AddressOfOp : SPV_Op<"mlir.addressof",
     <!-- End of AutoGen section -->
 
     ```
-    spv-address-of-op ::= ssa-id `=` `spv.mlir.addressof` symbol-ref-id
+    spv-address-of-op ::= ssa-id `=` `spirv.mlir.addressof` symbol-ref-id
                                      `:` spirv-pointer-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.mlir.addressof @global_var : !spv.ptr<f32, Input>
+    %0 = spirv.mlir.addressof @global_var : !spirv.ptr<f32, Input>
     ```
   }];
 
@@ -91,7 +91,7 @@ def SPV_ConstantOp : SPV_Op<"Constant",
     * ...
 
     Having such a plethora of constant instructions renders IR transformations
-    more tedious. Therefore, we use a single `spv.Constant` op to represent
+    more tedious. Therefore, we use a single `spirv.Constant` op to represent
     them all. Note that conversion between those SPIR-V constant instructions
     and this op is purely mechanical; so it can be scoped to the binary
     (de)serialization process.
@@ -99,16 +99,16 @@ def SPV_ConstantOp : SPV_Op<"Constant",
     <!-- End of AutoGen section -->
 
     ```
-    spv.Constant-op ::= ssa-id `=` `spv.Constant` attribute-value
+    spirv.Constant-op ::= ssa-id `=` `spirv.Constant` attribute-value
                         (`:` spirv-type)?
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.Constant true
-    %1 = spv.Constant dense<[2, 3]> : vector<2xf32>
-    %2 = spv.Constant [dense<3.0> : vector<2xf32>] : !spv.array<1xvector<2xf32>>
+    %0 = spirv.Constant true
+    %1 = spirv.Constant dense<[2, 3]> : vector<2xf32>
+    %2 = spirv.Constant [dense<3.0> : vector<2xf32>] : !spirv.array<1xvector<2xf32>>
     ```
 
     TODO: support constant structs
@@ -158,11 +158,11 @@ def SPV_EntryPointOp : SPV_Op<"EntryPoint", [InModuleScope]> {
     OpEntryPoint instructions with the same Execution Model and the same
     Name string.
 
-    Interface is a list of symbol references to `spv.GlobalVariable`
+    Interface is a list of symbol references to `spirv.GlobalVariable`
     operations. These declare the set of global variables from a
     module that form the interface of this entry point. The set of
     Interface symbols must be equal to or a superset of the
-    `spv.GlobalVariable`s referenced by the entry point’s static call
+    `spirv.GlobalVariable`s referenced by the entry point’s static call
     tree, within the interface’s storage classes.  Before version 1.4,
     the interface’s storage classes are limited to the Input and
     Output storage classes. Starting with version 1.4, the interface’s
@@ -175,15 +175,15 @@ def SPV_EntryPointOp : SPV_Op<"EntryPoint", [InModuleScope]> {
     execution-model ::= "Vertex" | "TesellationControl" |
                         <and other SPIR-V execution models...>
 
-    entry-point-op ::= ssa-id `=` `spv.EntryPoint` execution-model
+    entry-point-op ::= ssa-id `=` `spirv.EntryPoint` execution-model
                        symbol-reference (`, ` symbol-reference)*
     ```
 
     #### Example:
 
     ```mlir
-    spv.EntryPoint "GLCompute" @foo
-    spv.EntryPoint "Kernel" @foo, @var1, @var2
+    spirv.EntryPoint "GLCompute" @foo
+    spirv.EntryPoint "Kernel" @foo, @var1, @var2
 
     ```
   }];
@@ -224,15 +224,15 @@ def SPV_ExecutionModeOp : SPV_Op<"ExecutionMode", [InModuleScope]> {
     execution-mode ::= "Invocations" | "SpacingEqual" |
                        <and other SPIR-V execution modes...>
 
-    execution-mode-op ::= `spv.ExecutionMode ` ssa-use execution-mode
+    execution-mode-op ::= `spirv.ExecutionMode ` ssa-use execution-mode
                           (integer-literal (`, ` integer-literal)* )?
     ```
 
     #### Example:
 
     ```mlir
-    spv.ExecutionMode @foo "ContractionOff"
-    spv.ExecutionMode @bar "LocalSizeHint", 3, 4, 5
+    spirv.ExecutionMode @foo "ContractionOff"
+    spirv.ExecutionMode @bar "LocalSizeHint", 3, 4, 5
     ```
   }];
 
@@ -277,15 +277,15 @@ def SPV_FuncOp : SPV_Op<"func", [
 
     ```
     spv-function-control ::= "None" | "Inline" | "DontInline" | ...
-    spv-function-op ::= `spv.func` function-signature
+    spv-function-op ::= `spirv.func` function-signature
                          spv-function-control region
     ```
 
     #### Example:
 
     ```mlir
-    spv.func @foo() -> () "None" { ... }
-    spv.func @bar() -> () "Inline|Pure" { ... }
+    spirv.func @foo() -> () "None" { ... }
+    spirv.func @bar() -> () "Inline|Pure" { ... }
     ```
   }];
 
@@ -349,13 +349,13 @@ def SPV_GlobalVariableOp : SPV_Op<"GlobalVariable", [InModuleScope, Symbol]> {
     Initializer is optional.  If Initializer is present, it will be
     the initial value of the variable’s memory content. Initializer
     must be an symbol defined from a constant instruction or other
-    `spv.GlobalVariable` operation in module scope. Initializer must
+    `spirv.GlobalVariable` operation in module scope. Initializer must
     have the same type as the type of the defined symbol.
 
     <!-- End of AutoGen section -->
 
     ```
-    variable-op ::= `spv.GlobalVariable` spirv-type symbol-ref-id
+    variable-op ::= `spirv.GlobalVariable` spirv-type symbol-ref-id
                     (`initializer(` symbol-ref-id `)`)?
                     (`bind(` integer-literal, integer-literal `)`)?
                     (`built_in(` string-literal `)`)?
@@ -369,10 +369,10 @@ def SPV_GlobalVariableOp : SPV_Op<"GlobalVariable", [InModuleScope, Symbol]> {
     #### Example:
 
     ```mlir
-    spv.GlobalVariable @var0 : !spv.ptr<f32, Input> @var0
-    spv.GlobalVariable @var1 initializer(@var0) : !spv.ptr<f32, Output>
-    spv.GlobalVariable @var2 bind(1, 2) : !spv.ptr<f32, Uniform>
-    spv.GlobalVariable @var3 built_in("GlobalInvocationId") : !spv.ptr<vector<3xi32>, Input>
+    spirv.GlobalVariable @var0 : !spirv.ptr<f32, Input> @var0
+    spirv.GlobalVariable @var1 initializer(@var0) : !spirv.ptr<f32, Output>
+    spirv.GlobalVariable @var2 bind(1, 2) : !spirv.ptr<f32, Uniform>
+    spirv.GlobalVariable @var3 built_in("GlobalInvocationId") : !spirv.ptr<vector<3xi32>, Input>
     ```
   }];
 
@@ -457,7 +457,7 @@ def SPV_ModuleOp : SPV_Op<"module",
     ```
     addressing-model ::= `Logical` | `Physical32` | `Physical64` | ...
     memory-model ::= `Simple` | `GLSL450` | `OpenCL` | `Vulkan` | ...
-    spv-module-op ::= `spv.module` addressing-model memory-model
+    spv-module-op ::= `spirv.module` addressing-model memory-model
                       (requires  spirv-vce-attribute)?
                       (`attributes` attribute-dict)?
                       region
@@ -466,13 +466,13 @@ def SPV_ModuleOp : SPV_Op<"module",
     #### Example:
 
     ```mlir
-    spv.module Logical GLSL450  {}
+    spirv.module Logical GLSL450  {}
 
-    spv.module Logical Vulkan
-        requires #spv.vce<v1.0, [Shader], [SPV_KHR_vulkan_memory_model]>
+    spirv.module Logical Vulkan
+        requires #spirv.vce<v1.0, [Shader], [SPV_KHR_vulkan_memory_model]>
         attributes { some_additional_attr = ... } {
-      spv.func @do_nothing() -> () {
-        spv.Return
+      spirv.func @do_nothing() -> () {
+        spirv.Return
       }
     }
     ```
@@ -534,14 +534,14 @@ def SPV_ReferenceOfOp : SPV_Op<"mlir.referenceof", [NoSideEffect]> {
     <!-- End of AutoGen section -->
 
     ```
-    spv-reference-of-op ::= ssa-id `=` `spv.mlir.referenceof` symbol-ref-id
+    spv-reference-of-op ::= ssa-id `=` `spirv.mlir.referenceof` symbol-ref-id
                                        `:` spirv-scalar-type
     ```
 
     #### Example:
 
     ```mlir
-    %0 = spv.mlir.referenceof @spec_const : f32
+    %0 = spirv.mlir.referenceof @spec_const : f32
     ```
 
     TODO Add support for composite specialization constants.
@@ -577,14 +577,14 @@ def SPV_SpecConstantOp : SPV_Op<"SpecConstant", [InModuleScope, Symbol]> {
     * `OpSpecConstantTrue` and `OpSpecConstantFalse` for boolean constants
     * `OpSpecConstant` for scalar constants
 
-    Similar as `spv.Constant`, this op represents all of the above cases.
+    Similar as `spirv.Constant`, this op represents all of the above cases.
     `OpSpecConstantComposite` and `OpSpecConstantOp` are modelled with
     separate ops.
 
     <!-- End of AutoGen section -->
 
     ```
-    spv-spec-constant-op ::= `spv.SpecConstant` symbol-ref-id
+    spv-spec-constant-op ::= `spirv.SpecConstant` symbol-ref-id
                              `spec_id(` integer `)`
                              `=` attribute-value (`:` spirv-type)?
     ```
@@ -595,8 +595,8 @@ def SPV_SpecConstantOp : SPV_Op<"SpecConstant", [InModuleScope, Symbol]> {
     #### Example:
 
     ```mlir
-    spv.SpecConstant @spec_const1 = true
-    spv.SpecConstant @spec_const2 spec_id(5) = 42 : i32
+    spirv.SpecConstant @spec_const1 = true
+    spirv.SpecConstant @spec_const2 spec_id(5) = 42 : i32
     ```
   }];
 
@@ -621,30 +621,30 @@ def SPV_SpecConstantCompositeOp : SPV_Op<"SpecConstantComposite", [
   let description = [{
     This op declares a SPIR-V composite specialization constant. This covers
     the `OpSpecConstantComposite` SPIR-V instruction. Scalar constants are
-    covered by `spv.SpecConstant`.
+    covered by `spirv.SpecConstant`.
 
     A constituent of a spec constant composite can be:
     - A symbol referring of another spec constant.
     - The SSA ID of a non-specialization constant (i.e. defined through
-      `spv.SpecConstant`).
-    - The SSA ID of a `spv.Undef`.
+      `spirv.SpecConstant`).
+    - The SSA ID of a `spirv.Undef`.
 
     ```
-    spv-spec-constant-composite-op ::= `spv.SpecConstantComposite` symbol-ref-id ` (`
+    spv-spec-constant-composite-op ::= `spirv.SpecConstantComposite` symbol-ref-id ` (`
                                        symbol-ref-id (`, ` symbol-ref-id)*
                                        `) :` composite-type
     ```
 
      where `composite-type` is some non-scalar type that can be represented in the `spv`
-     dialect: `spv.struct`, `spv.array`, or `vector`.
+     dialect: `spirv.struct`, `spirv.array`, or `vector`.
 
      #### Example:
 
      ```mlir
-     spv.SpecConstant @sc1 = 1   : i32
-     spv.SpecConstant @sc2 = 2.5 : f32
-     spv.SpecConstant @sc3 = 3.5 : f32
-     spv.SpecConstantComposite @scc (@sc1, @sc2, @sc3) : !spv.struct<i32, f32, f32>
+     spirv.SpecConstant @sc1 = 1   : i32
+     spirv.SpecConstant @sc2 = 2.5 : f32
+     spirv.SpecConstant @sc3 = 3.5 : f32
+     spirv.SpecConstantComposite @scc (@sc1, @sc2, @sc3) : !spirv.struct<i32, f32, f32>
      ```
 
     TODO Add support for constituents that are:
@@ -682,15 +682,15 @@ def SPV_SpecConstantOperationOp : SPV_Op<"SpecConstantOperation", [
     In the `spv` dialect, this op is modelled as follows:
 
     ```
-    spv-spec-constant-operation-op ::= `spv.SpecConstantOperation` `wraps`
+    spv-spec-constant-operation-op ::= `spirv.SpecConstantOperation` `wraps`
                                          generic-spirv-op `:` function-type
     ```
 
-    In particular, an `spv.SpecConstantOperation` contains exactly one
+    In particular, an `spirv.SpecConstantOperation` contains exactly one
     region. In turn, that region, contains exactly 2 instructions:
     - One of SPIR-V's instructions that are allowed within an
     OpSpecConstantOp.
-    - An `spv.mlir.yield` instruction as the terminator.
+    - An `spirv.mlir.yield` instruction as the terminator.
 
     The following SPIR-V instructions are valid:
     - OpSConvert,
@@ -736,10 +736,10 @@ def SPV_SpecConstantOperationOp : SPV_Op<"SpecConstantOperation", [
 
     #### Example:
     ```mlir
-    %0 = spv.Constant 1: i32
-    %1 = spv.Constant 1: i32
+    %0 = spirv.Constant 1: i32
+    %1 = spirv.Constant 1: i32
 
-    %2 = spv.SpecConstantOperation wraps "spv.IAdd"(%0, %1) : (i32, i32) -> i32
+    %2 = spirv.SpecConstantOperation wraps "spirv.IAdd"(%0, %1) : (i32, i32) -> i32
     ```
   }];
 
@@ -762,25 +762,25 @@ def SPV_SpecConstantOperationOp : SPV_Op<"SpecConstantOperation", [
 def SPV_YieldOp : SPV_Op<"mlir.yield", [
     HasParent<"SpecConstantOperationOp">, NoSideEffect, Terminator]> {
   let summary = [{
-    Yields the result computed in `spv.SpecConstantOperation`'s
+    Yields the result computed in `spirv.SpecConstantOperation`'s
     region back to the parent op.
   }];
 
   let description = [{
     This op is a special terminator whose only purpose is to terminate
-    an `spv.SpecConstantOperation`'s enclosed region. It accepts a
+    an `spirv.SpecConstantOperation`'s enclosed region. It accepts a
     single operand produced by the preceeding (and only other) instruction
     in its parent block (see SPV_SpecConstantOperation for further
     details). This op has no corresponding SPIR-V instruction.
 
     ```
-    spv.mlir.yield ::= `spv.mlir.yield` ssa-id : spirv-type
+    spirv.mlir.yield ::= `spirv.mlir.yield` ssa-id : spirv-type
     ```
 
     #### Example:
     ```mlir
     %0 = ... (some op supported by SPIR-V OpSpecConstantOp)
-    spv.mlir.yield %0
+    spirv.mlir.yield %0
     ```
   }];
 

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVTypes.h b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVTypes.h
index d9737e4c2c57..8d48bc332e4a 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVTypes.h
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/SPIRVTypes.h
@@ -271,7 +271,7 @@ class SampledImageType
 ///
 /// would be represented in MLIR as:
 ///
-/// !spv.struct<A, (!spv.ptr<!spv.struct<A>, Generic>)>
+/// !spirv.struct<A, (!spirv.ptr<!spirv.struct<A>, Generic>)>
 ///
 /// In the above, expressing recursive struct types is accomplished by giving a
 /// recursive struct a unique identified and using that identifier in the struct

diff  --git a/mlir/include/mlir/Dialect/SPIRV/IR/TargetAndABI.h b/mlir/include/mlir/Dialect/SPIRV/IR/TargetAndABI.h
index 146ca7d38046..fbdc16abef1c 100644
--- a/mlir/include/mlir/Dialect/SPIRV/IR/TargetAndABI.h
+++ b/mlir/include/mlir/Dialect/SPIRV/IR/TargetAndABI.h
@@ -79,7 +79,7 @@ InterfaceVarABIAttr getInterfaceVarABIAttr(unsigned descriptorSet,
                                            MLIRContext *context);
 
 /// Returns whether the given SPIR-V target (described by TargetEnvAttr) needs
-/// ABI attributes for interface variables (spv.interface_var_abi).
+/// ABI attributes for interface variables (spirv.interface_var_abi).
 bool needsInterfaceVarABIAttrs(TargetEnvAttr targetAttr);
 
 /// Returns the attribute name for specifying entry point information.

diff  --git a/mlir/include/mlir/Dialect/SPIRV/Linking/ModuleCombiner.h b/mlir/include/mlir/Dialect/SPIRV/Linking/ModuleCombiner.h
index 833da17452f1..33f26c159501 100644
--- a/mlir/include/mlir/Dialect/SPIRV/Linking/ModuleCombiner.h
+++ b/mlir/include/mlir/Dialect/SPIRV/Linking/ModuleCombiner.h
@@ -50,18 +50,18 @@ using SymbolRenameListener = function_ref<void(
 /// For the conflict resolution phase, the following rules are employed to
 /// resolve such conflicts:
 ///
-/// - If 2 spv.func's have the same symbol name, then rename one of the
+/// - If 2 spirv.func's have the same symbol name, then rename one of the
 ///   functions.
-/// - If an spv.func and another op have the same symbol name, then rename the
+/// - If an spirv.func and another op have the same symbol name, then rename the
 ///   other symbol.
-/// - If none of the 2 conflicting ops are spv.func, then rename either.
+/// - If none of the 2 conflicting ops are spirv.func, then rename either.
 ///
 /// For deduplication, the following 3 cases are taken into consideration:
 ///
-/// - If 2 spv.GlobalVariable's have either the same descriptor set + binding
+/// - If 2 spirv.GlobalVariable's have either the same descriptor set + binding
 ///   or the same build_in attribute value, then replace one of them using the
 ///   other.
-/// - If 2 spv.SpecConstant's have the same spec_id attribute value, then
+/// - If 2 spirv.SpecConstant's have the same spec_id attribute value, then
 ///   replace one of them using the other.
 /// - Deduplicating functions are not supported right now.
 ///

diff  --git a/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.h b/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.h
index a50f680a2e5c..44bca8871a88 100644
--- a/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.h
@@ -48,27 +48,28 @@ std::unique_ptr<OperationPass<mlir::ModuleOp>>
 createDecorateSPIRVCompositeTypeLayoutPass();
 
 /// Creates an operation pass that deduces and attaches the minimal version/
-/// capabilities/extensions requirements for spv.module ops.
-/// For each spv.module op, this pass requires a `spv.target_env` attribute on
-/// it or an enclosing module-like op to drive the deduction. The reason is
+/// capabilities/extensions requirements for spirv.module ops.
+/// For each spirv.module op, this pass requires a `spirv.target_env` attribute
+/// on it or an enclosing module-like op to drive the deduction. The reason is
 /// that an op can be enabled by multiple extensions/capabilities. So we need
-/// to know which one to pick. `spv.target_env` gives the hard limit as for
+/// to know which one to pick. `spirv.target_env` gives the hard limit as for
 /// what the target environment can support; this pass deduces what are
-/// actually needed for a specific spv.module op.
+/// actually needed for a specific spirv.module op.
 std::unique_ptr<OperationPass<spirv::ModuleOp>>
 createUpdateVersionCapabilityExtensionPass();
 
 /// Creates an operation pass that lowers the ABI attributes specified during
 /// SPIR-V Lowering. Specifically,
 /// 1. Creates the global variables for arguments of entry point function using
-///    the specification in the `spv.interface_var_abi` attribute for each
+///    the specification in the `spirv.interface_var_abi` attribute for each
 ///    argument.
 /// 2. Inserts the EntryPointOp and the ExecutionModeOp for entry point
-///    functions using the specification in the `spv.entry_point_abi` attribute.
+///    functions using the specification in the `spirv.entry_point_abi`
+///    attribute.
 std::unique_ptr<OperationPass<spirv::ModuleOp>> createLowerABIAttributesPass();
 
 /// Creates an operation pass that rewrites sequential chains of
-/// spv.CompositeInsert into spv.CompositeConstruct.
+/// spirv.CompositeInsert into spirv.CompositeConstruct.
 std::unique_ptr<OperationPass<spirv::ModuleOp>> createRewriteInsertsPass();
 
 /// Creates an operation pass that unifies access of multiple aliased resources

diff  --git a/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.td b/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.td
index cbea46ef3921..5cd3dacbc8f5 100644
--- a/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.td
+++ b/mlir/include/mlir/Dialect/SPIRV/Transforms/Passes.td
@@ -28,8 +28,8 @@ def SPIRVLowerABIAttributes : Pass<"spirv-lower-abi-attrs", "spirv::ModuleOp"> {
 }
 
 def SPIRVRewriteInsertsPass : Pass<"spirv-rewrite-inserts", "spirv::ModuleOp"> {
-  let summary = "Rewrite sequential chains of spv.CompositeInsert operations into "
-                "spv.CompositeConstruct operations";
+  let summary = "Rewrite sequential chains of spirv.CompositeInsert operations into "
+                "spirv.CompositeConstruct operations";
   let constructor = "mlir::spirv::createRewriteInsertsPass()";
 }
 
@@ -42,7 +42,7 @@ def SPIRVUnifyAliasedResourcePass
 
 def SPIRVUpdateVCE : Pass<"spirv-update-vce", "spirv::ModuleOp"> {
   let summary = "Deduce and attach minimal (version, capabilities, extensions) "
-                "requirements to spv.module ops";
+                "requirements to spirv.module ops";
   let constructor = "mlir::spirv::createUpdateVersionCapabilityExtensionPass()";
 }
 

diff  --git a/mlir/include/mlir/IR/OpAsmInterface.td b/mlir/include/mlir/IR/OpAsmInterface.td
index 73e1f4fad6db..8b8d70ead2e3 100644
--- a/mlir/include/mlir/IR/OpAsmInterface.td
+++ b/mlir/include/mlir/IR/OpAsmInterface.td
@@ -100,7 +100,7 @@ def OpAsmOpInterface : OpInterface<"OpAsmOpInterface"> {
       Return the default dialect used when printing/parsing operations in
       regions nested under this operation. This allows for eliding the dialect
       prefix from the operation name, for example it would be possible to omit
-      the `spv.` prefix from all operations within a SpirV module if this method
+      the `spirv.` prefix from all operations within a SpirV module if this method
       returned `spv`. The default implementation returns an empty string which
       is ignored.
       }],

diff  --git a/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp b/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
index c6c3f2b366ea..1d151224075c 100644
--- a/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
+++ b/mlir/lib/Conversion/ArithmeticToSPIRV/ArithmeticToSPIRV.cpp
@@ -35,7 +35,7 @@ using namespace mlir;
 
 namespace {
 
-/// Converts composite arith.constant operation to spv.Constant.
+/// Converts composite arith.constant operation to spirv.Constant.
 struct ConstantCompositeOpPattern final
     : public OpConversionPattern<arith::ConstantOp> {
   using OpConversionPattern<arith::ConstantOp>::OpConversionPattern;
@@ -45,7 +45,7 @@ struct ConstantCompositeOpPattern final
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts scalar arith.constant operation to spv.Constant.
+/// Converts scalar arith.constant operation to spirv.Constant.
 struct ConstantScalarOpPattern final
     : public OpConversionPattern<arith::ConstantOp> {
   using OpConversionPattern<arith::ConstantOp>::OpConversionPattern;
@@ -58,7 +58,7 @@ struct ConstantScalarOpPattern final
 /// Converts arith.remsi to GLSL SPIR-V ops.
 ///
 /// This cannot be merged into the template unary/binary pattern due to Vulkan
-/// restrictions over spv.SRem and spv.SMod.
+/// restrictions over spirv.SRem and spirv.SMod.
 struct RemSIOpGLPattern final : public OpConversionPattern<arith::RemSIOp> {
   using OpConversionPattern<arith::RemSIOp>::OpConversionPattern;
 
@@ -108,8 +108,8 @@ struct XOrIOpBooleanPattern final : public OpConversionPattern<arith::XOrIOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts arith.uitofp to spv.Select if the type of source is i1 or vector of
-/// i1.
+/// Converts arith.uitofp to spirv.Select if the type of source is i1 or vector
+/// of i1.
 struct UIToFPI1Pattern final : public OpConversionPattern<arith::UIToFPOp> {
   using OpConversionPattern<arith::UIToFPOp>::OpConversionPattern;
 
@@ -118,8 +118,8 @@ struct UIToFPI1Pattern final : public OpConversionPattern<arith::UIToFPOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts arith.extui to spv.Select if the type of source is i1 or vector of
-/// i1.
+/// Converts arith.extui to spirv.Select if the type of source is i1 or vector
+/// of i1.
 struct ExtUII1Pattern final : public OpConversionPattern<arith::ExtUIOp> {
   using OpConversionPattern<arith::ExtUIOp>::OpConversionPattern;
 
@@ -128,8 +128,8 @@ struct ExtUII1Pattern final : public OpConversionPattern<arith::ExtUIOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts arith.trunci to spv.Select if the type of result is i1 or vector of
-/// i1.
+/// Converts arith.trunci to spirv.Select if the type of result is i1 or vector
+/// of i1.
 struct TruncII1Pattern final : public OpConversionPattern<arith::TruncIOp> {
   using OpConversionPattern<arith::TruncIOp>::OpConversionPattern;
 
@@ -200,7 +200,7 @@ class CmpFOpNanNonePattern final : public OpConversionPattern<arith::CmpFOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts arith.addui_carry to spv.IAddCarry.
+/// Converts arith.addui_carry to spirv.IAddCarry.
 class AddICarryOpPattern final
     : public OpConversionPattern<arith::AddUICarryOp> {
 public:
@@ -210,7 +210,7 @@ class AddICarryOpPattern final
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts arith.select to spv.Select.
+/// Converts arith.select to spirv.Select.
 class SelectOpPattern final : public OpConversionPattern<arith::SelectOp> {
 public:
   using OpConversionPattern<arith::SelectOp>::OpConversionPattern;
@@ -219,7 +219,7 @@ class SelectOpPattern final : public OpConversionPattern<arith::SelectOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts arith.maxf to spv.GL.FMax or spv.CL.fmax.
+/// Converts arith.maxf to spirv.GL.FMax or spirv.CL.fmax.
 template <typename Op, typename SPIRVOp>
 class MinMaxFOpPattern final : public OpConversionPattern<Op> {
 public:
@@ -478,8 +478,8 @@ LogicalResult ConstantScalarOpPattern::matchAndRewrite(
 ///
 /// Note that this is needed for Vulkan. Per the Vulkan's SPIR-V environment
 /// spec, "for the OpSRem and OpSMod instructions, if either operand is negative
-/// the result is undefined."  So we cannot directly use spv.SRem/spv.SMod
-/// if either operand can be negative. Emulate it via spv.UMod.
+/// the result is undefined."  So we cannot directly use spirv.SRem/spirv.SMod
+/// if either operand can be negative. Emulate it via spirv.UMod.
 template <typename SignedAbsOp>
 static Value emulateSignedRemainder(Location loc, Value lhs, Value rhs,
                                     Value signOperand, OpBuilder &builder) {
@@ -488,7 +488,7 @@ static Value emulateSignedRemainder(Location loc, Value lhs, Value rhs,
 
   Type type = lhs.getType();
 
-  // Calculate the remainder with spv.UMod.
+  // Calculate the remainder with spirv.UMod.
   Value lhsAbs = builder.create<SignedAbsOp>(loc, type, lhs);
   Value rhsAbs = builder.create<SignedAbsOp>(loc, type, rhs);
   Value abs = builder.create<spirv::UModOp>(loc, lhsAbs, rhsAbs);
@@ -926,10 +926,10 @@ LogicalResult MinMaxFOpPattern<Op, SPIRVOp>::matchAndRewrite(
 
   // arith.maxf/minf:
   //   "if one of the arguments is NaN, then the result is also NaN."
-  // spv.GL.FMax/FMin
+  // spirv.GL.FMax/FMin
   //   "which operand is the result is undefined if one of the operands
   //   is a NaN."
-  // spv.CL.fmax/fmin:
+  // spirv.CL.fmax/fmin:
   //   "If one argument is a NaN, Fmin returns the other argument."
 
   Location loc = op.getLoc();

diff  --git a/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRV.cpp b/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRV.cpp
index ffa618a02b3c..6787a5ccd3a4 100644
--- a/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRV.cpp
+++ b/mlir/lib/Conversion/ControlFlowToSPIRV/ControlFlowToSPIRV.cpp
@@ -31,7 +31,7 @@ using namespace mlir;
 
 namespace {
 
-/// Converts cf.br to spv.Branch.
+/// Converts cf.br to spirv.Branch.
 struct BranchOpPattern final : public OpConversionPattern<cf::BranchOp> {
   using OpConversionPattern<cf::BranchOp>::OpConversionPattern;
 
@@ -44,7 +44,7 @@ struct BranchOpPattern final : public OpConversionPattern<cf::BranchOp> {
   }
 };
 
-/// Converts cf.cond_br to spv.BranchConditional.
+/// Converts cf.cond_br to spirv.BranchConditional.
 struct CondBranchOpPattern final
     : public OpConversionPattern<cf::CondBranchOp> {
   using OpConversionPattern<cf::CondBranchOp>::OpConversionPattern;

diff  --git a/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRV.cpp b/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRV.cpp
index 1a5606b738dc..a5755514b3e5 100644
--- a/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRV.cpp
+++ b/mlir/lib/Conversion/FuncToSPIRV/FuncToSPIRV.cpp
@@ -35,7 +35,7 @@ using namespace mlir;
 
 namespace {
 
-/// Converts func.return to spv.Return.
+/// Converts func.return to spirv.Return.
 class ReturnOpPattern final : public OpConversionPattern<func::ReturnOp> {
 public:
   using OpConversionPattern<func::ReturnOp>::OpConversionPattern;
@@ -56,7 +56,7 @@ class ReturnOpPattern final : public OpConversionPattern<func::ReturnOp> {
   }
 };
 
-/// Converts func.call to spv.FunctionCall.
+/// Converts func.call to spirv.FunctionCall.
 class CallOpPattern final : public OpConversionPattern<func::CallOp> {
 public:
   using OpConversionPattern<func::CallOp>::OpConversionPattern;
@@ -64,7 +64,7 @@ class CallOpPattern final : public OpConversionPattern<func::CallOp> {
   LogicalResult
   matchAndRewrite(func::CallOp callOp, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    // multiple results func was not converted to spv.func
+    // multiple results func was not converted to spirv.func
     if (callOp.getNumResults() > 1)
       return failure();
     if (callOp.getNumResults() == 1) {

diff  --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
index e1fa1fefbf24..faf9a0baafa2 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRV.cpp
@@ -53,7 +53,7 @@ class SingleDimLaunchConfigConversion : public OpConversionPattern<SourceOp> {
 
 /// This is separate because in Vulkan workgroup size is exposed to shaders via
 /// a constant with WorkgroupSize decoration. So here we cannot generate a
-/// builtin variable; instead the information in the `spv.entry_point_abi`
+/// builtin variable; instead the information in the `spirv.entry_point_abi`
 /// attribute on the surrounding FuncOp is used to replace the gpu::BlockDimOp.
 class WorkGroupSizeConversion : public OpConversionPattern<gpu::BlockDimOp> {
 public:
@@ -65,7 +65,7 @@ class WorkGroupSizeConversion : public OpConversionPattern<gpu::BlockDimOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Pattern to convert a kernel function in GPU dialect within a spv.module.
+/// Pattern to convert a kernel function in GPU dialect within a spirv.module.
 class GPUFuncOpConversion final : public OpConversionPattern<gpu::GPUFuncOp> {
 public:
   using OpConversionPattern<gpu::GPUFuncOp>::OpConversionPattern;
@@ -78,7 +78,7 @@ class GPUFuncOpConversion final : public OpConversionPattern<gpu::GPUFuncOp> {
   SmallVector<int32_t, 3> workGroupSizeAsInt32;
 };
 
-/// Pattern to convert a gpu.module to a spv.module.
+/// Pattern to convert a gpu.module to a spirv.module.
 class GPUModuleConversion final : public OpConversionPattern<gpu::GPUModuleOp> {
 public:
   using OpConversionPattern<gpu::GPUModuleOp>::OpConversionPattern;
@@ -112,7 +112,7 @@ class GPUReturnOpConversion final : public OpConversionPattern<gpu::ReturnOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Pattern to convert a gpu.barrier op into a spv.ControlBarrier op.
+/// Pattern to convert a gpu.barrier op into a spirv.ControlBarrier op.
 class GPUBarrierConversion final : public OpConversionPattern<gpu::BarrierOp> {
 public:
   using OpConversionPattern::OpConversionPattern;
@@ -122,7 +122,7 @@ class GPUBarrierConversion final : public OpConversionPattern<gpu::BarrierOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Pattern to convert a gpu.shuffle op into a spv.GroupNonUniformShuffle op.
+/// Pattern to convert a gpu.shuffle op into a spirv.GroupNonUniformShuffle op.
 class GPUShuffleConversion final : public OpConversionPattern<gpu::ShuffleOp> {
 public:
   using OpConversionPattern::OpConversionPattern;
@@ -249,8 +249,8 @@ lowerAsEntryFunction(gpu::GPUFuncOp funcOp, TypeConverter &typeConverter,
   return newFuncOp;
 }
 
-/// Populates `argABI` with spv.interface_var_abi attributes for lowering
-/// gpu.func to spv.func if no arguments have the attributes set
+/// Populates `argABI` with spirv.interface_var_abi attributes for lowering
+/// gpu.func to spirv.func if no arguments have the attributes set
 /// already. Returns failure if any argument has the ABI attribute set already.
 static LogicalResult
 getDefaultABIAttrs(MLIRContext *context, gpu::GPUFuncOp funcOp,
@@ -288,7 +288,7 @@ LogicalResult GPUFuncOpConversion::matchAndRewrite(
           argIndex, spirv::getInterfaceVarABIAttrName());
       if (!abiAttr) {
         funcOp.emitRemark(
-            "match failure: missing 'spv.interface_var_abi' attribute at "
+            "match failure: missing 'spirv.interface_var_abi' attribute at "
             "argument ")
             << argIndex;
         return failure();
@@ -299,7 +299,8 @@ LogicalResult GPUFuncOpConversion::matchAndRewrite(
 
   auto entryPointAttr = spirv::lookupEntryPointABI(funcOp);
   if (!entryPointAttr) {
-    funcOp.emitRemark("match failure: missing 'spv.entry_point_abi' attribute");
+    funcOp.emitRemark(
+        "match failure: missing 'spirv.entry_point_abi' attribute");
     return failure();
   }
   spirv::FuncOp newFuncOp = lowerAsEntryFunction(
@@ -323,7 +324,7 @@ LogicalResult GPUModuleConversion::matchAndRewrite(
   FailureOr<spirv::MemoryModel> memoryModel = spirv::getMemoryModel(targetEnv);
   if (failed(memoryModel))
     return moduleOp.emitRemark("match failure: could not selected memory model "
-                               "based on 'spv.target_env'");
+                               "based on 'spirv.target_env'");
 
   // Add a keyword to the module name to avoid symbolic conflict.
   std::string spvModuleName = (kSPIRVModule + moduleOp.getName()).str();
@@ -335,7 +336,7 @@ LogicalResult GPUModuleConversion::matchAndRewrite(
   Region &spvModuleRegion = spvModule.getRegion();
   rewriter.inlineRegionBefore(moduleOp.getBodyRegion(), spvModuleRegion,
                               spvModuleRegion.begin());
-  // The spv.module build method adds a block. Remove that.
+  // The spirv.module build method adds a block. Remove that.
   rewriter.eraseBlock(&spvModuleRegion.back());
   rewriter.eraseOp(moduleOp);
   return success();

diff  --git a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
index b880c65ff847..c38a71b77a2d 100644
--- a/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
+++ b/mlir/lib/Conversion/GPUToSPIRV/GPUToSPIRVPass.cpp
@@ -7,7 +7,7 @@
 //===----------------------------------------------------------------------===//
 //
 // This file implements a pass to convert a kernel function in the GPU Dialect
-// into a spv.module operation.
+// into a spirv.module operation.
 //
 //===----------------------------------------------------------------------===//
 

diff  --git a/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp b/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
index 8f084958e11f..b6aff1640749 100644
--- a/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
+++ b/mlir/lib/Conversion/GPUToVulkan/ConvertGPULaunchFuncToVulkanLaunchFunc.cpp
@@ -140,7 +140,7 @@ LogicalResult ConvertGpuLaunchFuncToVulkanLaunchFunc::createBinaryShader(
   SmallVector<uint32_t, 0> binary;
   for (auto spirvModule : module.getOps<spirv::ModuleOp>()) {
     if (done)
-      return spirvModule.emitError("should only contain one 'spv.module' op");
+      return spirvModule.emitError("should only contain one 'spirv.module' op");
     done = true;
 
     if (failed(spirv::serialize(spirvModule, binary)))

diff  --git a/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRV.cpp b/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRV.cpp
index 3534ba74e8c4..305ea9c0f32f 100644
--- a/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRV.cpp
+++ b/mlir/lib/Conversion/LinalgToSPIRV/LinalgToSPIRV.cpp
@@ -172,11 +172,11 @@ LogicalResult SingleWorkgroupReduction::matchAndRewrite(
                            zeroIndices, loc, rewriter);
 
   // Write out the final reduction result. This should be only conducted by one
-  // invocation. We use spv.GroupNonUniformElect to find the invocation with the
-  // lowest ID.
+  // invocation. We use spirv.GroupNonUniformElect to find the invocation with
+  // the lowest ID.
   //
   // ```
-  // if (spv.GroupNonUniformElect) { output = ... }
+  // if (spirv.GroupNonUniformElect) { output = ... }
   // ```
 
   Value condition = rewriter.create<spirv::GroupNonUniformElectOp>(

diff  --git a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
index 4c0736d121d4..7f31488ad2b5 100644
--- a/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
+++ b/mlir/lib/Conversion/MemRefToSPIRV/MemRefToSPIRV.cpp
@@ -55,8 +55,8 @@ static Value getOffsetForBitwidth(Location loc, Value srcIdx, int sourceBits,
 /// supported. During conversion if a memref of an unsupported type is used,
 /// load/stores to this memref need to be modified to use a supported higher
 /// bitwidth `targetBits` and extracting the required bits. For an accessing a
-/// 1D array (spv.array or spv.rt_array), the last index is modified to load the
-/// bits needed. The extraction of the actual bits needed are handled
+/// 1D array (spirv.array or spirv.rt_array), the last index is modified to load
+/// the bits needed. The extraction of the actual bits needed are handled
 /// separately. Note that this only works for a 1-D tensor.
 static Value adjustAccessChainForBitwidth(SPIRVTypeConverter &typeConverter,
                                           spirv::AccessChainOp op,
@@ -170,8 +170,8 @@ class AllocaOpPattern final : public OpConversionPattern<memref::AllocaOp> {
 
 /// Converts an allocation operation to SPIR-V. Currently only supports lowering
 /// to Workgroup memory when the size is constant.  Note that this pattern needs
-/// to be applied in a pass that runs at least at spv.module scope since it wil
-/// ladd global variables into the spv.module.
+/// to be applied in a pass that runs at least at spirv.module scope since it
+/// wil ladd global variables into the spirv.module.
 class AllocOpPattern final : public OpConversionPattern<memref::AllocOp> {
 public:
   using OpConversionPattern<memref::AllocOp>::OpConversionPattern;
@@ -192,7 +192,7 @@ class DeallocOpPattern final : public OpConversionPattern<memref::DeallocOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts memref.load to spv.Load + spv.AccessChain on integers.
+/// Converts memref.load to spirv.Load + spirv.AccessChain on integers.
 class IntLoadOpPattern final : public OpConversionPattern<memref::LoadOp> {
 public:
   using OpConversionPattern<memref::LoadOp>::OpConversionPattern;
@@ -202,7 +202,7 @@ class IntLoadOpPattern final : public OpConversionPattern<memref::LoadOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts memref.load to spv.Load + spv.AccessChain.
+/// Converts memref.load to spirv.Load + spirv.AccessChain.
 class LoadOpPattern final : public OpConversionPattern<memref::LoadOp> {
 public:
   using OpConversionPattern<memref::LoadOp>::OpConversionPattern;
@@ -212,7 +212,7 @@ class LoadOpPattern final : public OpConversionPattern<memref::LoadOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts memref.store to spv.Store on integers.
+/// Converts memref.store to spirv.Store on integers.
 class IntStoreOpPattern final : public OpConversionPattern<memref::StoreOp> {
 public:
   using OpConversionPattern<memref::StoreOp>::OpConversionPattern;
@@ -222,7 +222,7 @@ class IntStoreOpPattern final : public OpConversionPattern<memref::StoreOp> {
                   ConversionPatternRewriter &rewriter) const override;
 };
 
-/// Converts memref.store to spv.Store.
+/// Converts memref.store to spirv.Store.
 class StoreOpPattern final : public OpConversionPattern<memref::StoreOp> {
 public:
   using OpConversionPattern<memref::StoreOp>::OpConversionPattern;
@@ -267,7 +267,7 @@ AllocOpPattern::matchAndRewrite(memref::AllocOp operation, OpAdaptor adaptor,
   // Get the SPIR-V type for the allocation.
   Type spirvType = getTypeConverter()->convertType(allocType);
 
-  // Insert spv.GlobalVariable for this allocation.
+  // Insert spirv.GlobalVariable for this allocation.
   Operation *parent =
       SymbolTable::getNearestSymbolTable(operation->getParentOp());
   if (!parent)
@@ -360,7 +360,7 @@ IntLoadOpPattern::matchAndRewrite(memref::LoadOp loadOp, OpAdaptor adaptor,
   }
 
   // Bitcasting is currently unsupported for Kernel capability /
-  // spv.PtrAccessChain.
+  // spirv.PtrAccessChain.
   if (typeConverter.allows(spirv::Capability::Kernel))
     return failure();
 
@@ -488,7 +488,7 @@ IntStoreOpPattern::matchAndRewrite(memref::StoreOp storeOp, OpAdaptor adaptor,
   }
 
   // Bitcasting is currently unsupported for Kernel capability /
-  // spv.PtrAccessChain.
+  // spirv.PtrAccessChain.
   if (typeConverter.allows(spirv::Capability::Kernel))
     return failure();
 

diff  --git a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
index 72ccb3a82518..75d0b8edca15 100644
--- a/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
+++ b/mlir/lib/Conversion/SCFToSPIRV/SCFToSPIRV.cpp
@@ -26,9 +26,9 @@ using namespace mlir;
 
 namespace mlir {
 struct ScfToSPIRVContextImpl {
-  // Map between the spirv region control flow operation (spv.mlir.loop or
-  // spv.mlir.selection) to the VariableOp created to store the region results.
-  // The order of the VariableOp matches the order of the results.
+  // Map between the spirv region control flow operation (spirv.mlir.loop or
+  // spirv.mlir.selection) to the VariableOp created to store the region
+  // results. The order of the VariableOp matches the order of the results.
   DenseMap<Operation *, SmallVector<spirv::VariableOp, 8>> outputVars;
 };
 } // namespace mlir
@@ -120,9 +120,9 @@ class WhileOpConversion final : public SCFToSPIRVPattern<scf::WhileOp> {
 
 /// Helper function to replaces SCF op outputs with SPIR-V variable loads.
 /// We create VariableOp to handle the results value of the control flow region.
-/// spv.mlir.loop/spv.mlir.selection currently don't yield value. Right after
-/// the loop we load the value from the allocation and use it as the SCF op
-/// result.
+/// spirv.mlir.loop/spirv.mlir.selection currently don't yield value. Right
+/// after the loop we load the value from the allocation and use it as the SCF
+/// op result.
 template <typename ScfOp, typename OpTy>
 static void replaceSCFOutputValue(ScfOp scfOp, OpTy newOp,
                                   ConversionPatternRewriter &rewriter,
@@ -248,7 +248,7 @@ IfOpConversion::matchAndRewrite(scf::IfOp ifOp, OpAdaptor adaptor,
   // subsequently converges.
   auto loc = ifOp.getLoc();
 
-  // Create `spv.selection` operation, selection header block and merge block.
+  // Create `spirv.selection` operation, selection header block and merge block.
   auto selectionOp =
       rewriter.create<spirv::SelectionOp>(loc, spirv::SelectionControl::None);
   auto *mergeBlock =
@@ -277,7 +277,7 @@ IfOpConversion::matchAndRewrite(scf::IfOp ifOp, OpAdaptor adaptor,
     rewriter.inlineRegionBefore(elseRegion, mergeBlock);
   }
 
-  // Create a `spv.BranchConditional` operation for selection header block.
+  // Create a `spirv.BranchConditional` operation for selection header block.
   rewriter.setInsertionPointToEnd(selectionHeaderBlock);
   rewriter.create<spirv::BranchConditionalOp>(loc, adaptor.getCondition(),
                                               thenBlock, ArrayRef<Value>(),

diff  --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
index e97774832f56..3957ff4558ee 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
@@ -105,7 +105,7 @@ static bool hasDescriptorSetAndBinding(spirv::GlobalVariableOp op) {
 
 /// Fills `globalVariableMap` with SPIR-V global variables that represent kernel
 /// arguments from the given SPIR-V module. We assume that the module contains a
-/// single entry point function. Hence, all `spv.GlobalVariable`s with a bind
+/// single entry point function. Hence, all `spirv.GlobalVariable`s with a bind
 /// attribute are kernel arguments.
 static LogicalResult getKernelGlobalVariables(
     spirv::ModuleOp module,

diff  --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
index 7a3c87342643..f2e93124c729 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVM.cpp
@@ -217,7 +217,7 @@ static Value createI32ConstantOf(Location loc, PatternRewriter &rewriter,
       rewriter.getIntegerAttr(rewriter.getI32Type(), value));
 }
 
-/// Utility for `spv.Load` and `spv.Store` conversion.
+/// Utility for `spirv.Load` and `spirv.Store` conversion.
 static LogicalResult replaceWithLoadOrStore(Operation *op, ValueRange operands,
                                             ConversionPatternRewriter &rewriter,
                                             LLVMTypeConverter &typeConverter,
@@ -551,8 +551,8 @@ class BranchConditionalConversionPattern
   }
 };
 
-/// Converts `spv.getCompositeExtract` to `llvm.extractvalue` if the container type
-/// is an aggregate type (struct or array). Otherwise, converts to
+/// Converts `spirv.getCompositeExtract` to `llvm.extractvalue` if the container
+/// type is an aggregate type (struct or array). Otherwise, converts to
 /// `llvm.extractelement` that operates on vectors.
 class CompositeExtractPattern
     : public SPIRVToLLVMConversion<spirv::CompositeExtractOp> {
@@ -582,8 +582,8 @@ class CompositeExtractPattern
   }
 };
 
-/// Converts `spv.getCompositeInsert` to `llvm.insertvalue` if the container type
-/// is an aggregate type (struct or array). Otherwise, converts to
+/// Converts `spirv.getCompositeInsert` to `llvm.insertvalue` if the container
+/// type is an aggregate type (struct or array). Otherwise, converts to
 /// `llvm.insertelement` that operates on vectors.
 class CompositeInsertPattern
     : public SPIRVToLLVMConversion<spirv::CompositeInsertOp> {
@@ -633,7 +633,7 @@ class DirectConversionPattern : public SPIRVToLLVMConversion<SPIRVOp> {
   }
 };
 
-/// Converts `spv.ExecutionMode` into a global struct constant that holds
+/// Converts `spirv.ExecutionMode` into a global struct constant that holds
 /// execution mode information.
 class ExecutionModePattern
     : public SPIRVToLLVMConversion<spirv::ExecutionModeOp> {
@@ -708,9 +708,9 @@ class ExecutionModePattern
   }
 };
 
-/// Converts `spv.GlobalVariable` to `llvm.mlir.global`. Note that SPIR-V global
-/// returns a pointer, whereas in LLVM dialect the global holds an actual value.
-/// This 
diff erence is handled by `spv.mlir.addressof` and
+/// Converts `spirv.GlobalVariable` to `llvm.mlir.global`. Note that SPIR-V
+/// global returns a pointer, whereas in LLVM dialect the global holds an actual
+/// value. This 
diff erence is handled by `spirv.mlir.addressof` and
 /// `llvm.mlir.addressof`ops that both return a pointer.
 class GlobalVariablePattern
     : public SPIRVToLLVMConversion<spirv::GlobalVariableOp> {
@@ -887,7 +887,7 @@ class InverseSqrtPattern
   }
 };
 
-/// Converts `spv.Load` and `spv.Store` to LLVM dialect.
+/// Converts `spirv.Load` and `spirv.Store` to LLVM dialect.
 template <typename SPIRVOp>
 class LoadStorePattern : public SPIRVToLLVMConversion<SPIRVOp> {
 public:
@@ -923,7 +923,7 @@ class LoadStorePattern : public SPIRVToLLVMConversion<SPIRVOp> {
   }
 };
 
-/// Converts `spv.Not` and `spv.LogicalNot` into LLVM dialect.
+/// Converts `spirv.Not` and `spirv.LogicalNot` into LLVM dialect.
 template <typename SPIRVOp>
 class NotPattern : public SPIRVToLLVMConversion<SPIRVOp> {
 public:
@@ -991,12 +991,12 @@ class ReturnValuePattern : public SPIRVToLLVMConversion<spirv::ReturnValueOp> {
   }
 };
 
-/// Converts `spv.mlir.loop` to LLVM dialect. All blocks within selection should
-/// be reachable for conversion to succeed. The structure of the loop in LLVM
-/// dialect will be the following:
+/// Converts `spirv.mlir.loop` to LLVM dialect. All blocks within selection
+/// should be reachable for conversion to succeed. The structure of the loop in
+/// LLVM dialect will be the following:
 ///
 ///      +------------------------------------+
-///      | <code before spv.mlir.loop>        |
+///      | <code before spirv.mlir.loop>        |
 ///      | llvm.br ^header                    |
 ///      +------------------------------------+
 ///                           |
@@ -1036,7 +1036,7 @@ class ReturnValuePattern : public SPIRVToLLVMConversion<spirv::ReturnValueOp> {
 ///                        V
 ///      +------------------------------------+
 ///      | ^remaining:                        |
-///      |   <code after spv.mlir.loop>       |
+///      |   <code after spirv.mlir.loop>       |
 ///      +------------------------------------+
 ///
 class LoopPattern : public SPIRVToLLVMConversion<spirv::LoopOp> {
@@ -1052,8 +1052,8 @@ class LoopPattern : public SPIRVToLLVMConversion<spirv::LoopOp> {
 
     Location loc = loopOp.getLoc();
 
-    // Split the current block after `spv.mlir.loop`. The remaining ops will be
-    // used in `endBlock`.
+    // Split the current block after `spirv.mlir.loop`. The remaining ops will
+    // be used in `endBlock`.
     Block *currentBlock = rewriter.getBlock();
     auto position = Block::iterator(loopOp);
     Block *endBlock = rewriter.splitBlock(currentBlock, position);
@@ -1083,7 +1083,7 @@ class LoopPattern : public SPIRVToLLVMConversion<spirv::LoopOp> {
   }
 };
 
-/// Converts `spv.mlir.selection` with `spv.BranchConditional` in its header
+/// Converts `spirv.mlir.selection` with `spirv.BranchConditional` in its header
 /// block. All blocks within selection should be reachable for conversion to
 /// succeed.
 class SelectionPattern : public SPIRVToLLVMConversion<spirv::SelectionOp> {
@@ -1099,7 +1099,7 @@ class SelectionPattern : public SPIRVToLLVMConversion<spirv::SelectionOp> {
     if (op.getSelectionControl() != spirv::SelectionControl::None)
       return failure();
 
-    // `spv.mlir.selection` should have at least two blocks: one selection
+    // `spirv.mlir.selection` should have at least two blocks: one selection
     // header block and one merge block. If no blocks are present, or control
     // flow branches straight to merge block (two blocks are present), the op is
     // redundant and it is erased.
@@ -1110,7 +1110,7 @@ class SelectionPattern : public SPIRVToLLVMConversion<spirv::SelectionOp> {
 
     Location loc = op.getLoc();
 
-    // Split the current block after `spv.mlir.selection`. The remaining ops
+    // Split the current block after `spirv.mlir.selection`. The remaining ops
     // will be used in `continueBlock`.
     auto *currentBlock = rewriter.getInsertionBlock();
     rewriter.setInsertionPointAfter(op);
@@ -1118,9 +1118,9 @@ class SelectionPattern : public SPIRVToLLVMConversion<spirv::SelectionOp> {
     auto *continueBlock = rewriter.splitBlock(currentBlock, position);
 
     // Extract conditional branch information from the header block. By SPIR-V
-    // dialect spec, it should contain `spv.BranchConditional` or `spv.Switch`
-    // op. Note that `spv.Switch op` is not supported at the moment in the
-    // SPIR-V dialect. Remove this block when finished.
+    // dialect spec, it should contain `spirv.BranchConditional` or
+    // `spirv.Switch` op. Note that `spirv.Switch op` is not supported at the
+    // moment in the SPIR-V dialect. Remove this block when finished.
     auto *headerBlock = op.getHeaderBlock();
     assert(headerBlock->getOperations().size() == 1);
     auto condBrOp = dyn_cast<spirv::BranchConditionalOp>(
@@ -1211,7 +1211,7 @@ class TanPattern : public SPIRVToLLVMConversion<spirv::GLTanOp> {
   }
 };
 
-/// Convert `spv.Tanh` to
+/// Convert `spirv.Tanh` to
 ///
 ///   exp(2x) - 1
 ///   -----------

diff  --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp
index 28236ce7f3e2..a2f3c8adf395 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp
@@ -53,7 +53,7 @@ void ConvertSPIRVToLLVMPass::runOnOperation() {
   target.addIllegalDialect<spirv::SPIRVDialect>();
   target.addLegalDialect<LLVM::LLVMDialect>();
 
-  // Set `ModuleOp` as legal for `spv.module` conversion.
+  // Set `ModuleOp` as legal for `spirv.module` conversion.
   target.addLegalOp<ModuleOp>();
   if (failed(applyPartialConversion(module, target, std::move(patterns))))
     signalPassFailure();

diff  --git a/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp b/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
index 30cfecf92768..776d6c7a6930 100644
--- a/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
+++ b/mlir/lib/Conversion/TensorToSPIRV/TensorToSPIRV.cpp
@@ -68,7 +68,7 @@ class TensorExtractPattern final
     spirv::VariableOp varOp;
     if (adaptor.getTensor().getDefiningOp<spirv::ConstantOp>()) {
       // We could use the initializer directly; but certain driver compilers
-      // have bugs dealing with that. So for now, use spv.Store for
+      // have bugs dealing with that. So for now, use spirv.Store for
       // initialization.
       varOp = rewriter.create<spirv::VariableOp>(loc, varType,
                                                  spirv::StorageClass::Function,

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVAttributes.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVAttributes.cpp
index add1ebff925e..ea5bd3158ea0 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVAttributes.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVAttributes.cpp
@@ -614,7 +614,7 @@ static void print(spirv::VerCapExtAttr triple, DialectAsmPrinter &printer) {
 }
 
 static void print(spirv::TargetEnvAttr targetEnv, DialectAsmPrinter &printer) {
-  printer << spirv::TargetEnvAttr::getKindName() << "<#spv.";
+  printer << spirv::TargetEnvAttr::getKindName() << "<#spirv.";
   print(targetEnv.getTripleAttr(), printer);
   spirv::Vendor vendorID = targetEnv.getVendorID();
   spirv::DeviceType deviceType = targetEnv.getDeviceType();

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
index fd197a40a725..b3444d8b210a 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.cpp
@@ -74,7 +74,7 @@ namespace {
 } // namespace
 
 //===----------------------------------------------------------------------===//
-// spv.AccessChainOp
+// spirv.AccessChainOp
 //===----------------------------------------------------------------------===//
 
 namespace {
@@ -113,7 +113,7 @@ void spirv::AccessChainOp::getCanonicalizationPatterns(
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitcastOp
+// spirv.BitcastOp
 //===----------------------------------------------------------------------===//
 
 void spirv::BitcastOp::getCanonicalizationPatterns(RewritePatternSet &results,
@@ -122,7 +122,7 @@ void spirv::BitcastOp::getCanonicalizationPatterns(RewritePatternSet &results,
 }
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeExtractOp
+// spirv.CompositeExtractOp
 //===----------------------------------------------------------------------===//
 
 OpFoldResult spirv::CompositeExtractOp::fold(ArrayRef<Attribute> operands) {
@@ -150,20 +150,20 @@ OpFoldResult spirv::CompositeExtractOp::fold(ArrayRef<Attribute> operands) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Constant
+// spirv.Constant
 //===----------------------------------------------------------------------===//
 
 OpFoldResult spirv::ConstantOp::fold(ArrayRef<Attribute> operands) {
-  assert(operands.empty() && "spv.Constant has no operands");
+  assert(operands.empty() && "spirv.Constant has no operands");
   return getValue();
 }
 
 //===----------------------------------------------------------------------===//
-// spv.IAdd
+// spirv.IAdd
 //===----------------------------------------------------------------------===//
 
 OpFoldResult spirv::IAddOp::fold(ArrayRef<Attribute> operands) {
-  assert(operands.size() == 2 && "spv.IAdd expects two operands");
+  assert(operands.size() == 2 && "spirv.IAdd expects two operands");
   // x + 0 = x
   if (matchPattern(getOperand2(), m_Zero()))
     return getOperand1();
@@ -178,11 +178,11 @@ OpFoldResult spirv::IAddOp::fold(ArrayRef<Attribute> operands) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.IMul
+// spirv.IMul
 //===----------------------------------------------------------------------===//
 
 OpFoldResult spirv::IMulOp::fold(ArrayRef<Attribute> operands) {
-  assert(operands.size() == 2 && "spv.IMul expects two operands");
+  assert(operands.size() == 2 && "spirv.IMul expects two operands");
   // x * 0 == 0
   if (matchPattern(getOperand2(), m_Zero()))
     return getOperand2();
@@ -200,7 +200,7 @@ OpFoldResult spirv::IMulOp::fold(ArrayRef<Attribute> operands) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ISub
+// spirv.ISub
 //===----------------------------------------------------------------------===//
 
 OpFoldResult spirv::ISubOp::fold(ArrayRef<Attribute> operands) {
@@ -218,11 +218,11 @@ OpFoldResult spirv::ISubOp::fold(ArrayRef<Attribute> operands) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalAnd
+// spirv.LogicalAnd
 //===----------------------------------------------------------------------===//
 
 OpFoldResult spirv::LogicalAndOp::fold(ArrayRef<Attribute> operands) {
-  assert(operands.size() == 2 && "spv.LogicalAnd should take two operands");
+  assert(operands.size() == 2 && "spirv.LogicalAnd should take two operands");
 
   if (Optional<bool> rhs = getScalarOrSplatBoolAttr(operands.back())) {
     // x && true = x
@@ -238,7 +238,7 @@ OpFoldResult spirv::LogicalAndOp::fold(ArrayRef<Attribute> operands) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalNot
+// spirv.LogicalNot
 //===----------------------------------------------------------------------===//
 
 void spirv::LogicalNotOp::getCanonicalizationPatterns(
@@ -250,11 +250,11 @@ void spirv::LogicalNotOp::getCanonicalizationPatterns(
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalOr
+// spirv.LogicalOr
 //===----------------------------------------------------------------------===//
 
 OpFoldResult spirv::LogicalOrOp::fold(ArrayRef<Attribute> operands) {
-  assert(operands.size() == 2 && "spv.LogicalOr should take two operands");
+  assert(operands.size() == 2 && "spirv.LogicalOr should take two operands");
 
   if (auto rhs = getScalarOrSplatBoolAttr(operands.back())) {
     if (rhs.value())
@@ -270,16 +270,16 @@ OpFoldResult spirv::LogicalOrOp::fold(ArrayRef<Attribute> operands) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.selection
+// spirv.mlir.selection
 //===----------------------------------------------------------------------===//
 
 namespace {
-// Blocks from the given `spv.mlir.selection` operation must satisfy the
+// Blocks from the given `spirv.mlir.selection` operation must satisfy the
 // following layout:
 //
 //       +-----------------------------------------------+
 //       | header block                                  |
-//       | spv.BranchConditionalOp %cond, ^case0, ^case1 |
+//       | spirv.BranchConditionalOp %cond, ^case0, ^case1 |
 //       +-----------------------------------------------+
 //                            /   \
 //                             ...
@@ -287,8 +287,8 @@ namespace {
 //
 //   +------------------------+    +------------------------+
 //   | case #0                |    | case #1                |
-//   | spv.Store %ptr %value0 |    | spv.Store %ptr %value1 |
-//   | spv.Branch ^merge      |    | spv.Branch ^merge      |
+//   | spirv.Store %ptr %value0 |    | spirv.Store %ptr %value1 |
+//   | spirv.Branch ^merge      |    | spirv.Branch ^merge      |
 //   +------------------------+    +------------------------+
 //
 //
@@ -307,7 +307,7 @@ struct ConvertSelectionOpToSelect
                                 PatternRewriter &rewriter) const override {
     auto *op = selectionOp.getOperation();
     auto &body = op->getRegion(0);
-    // Verifier allows an empty region for `spv.mlir.selection`.
+    // Verifier allows an empty region for `spirv.mlir.selection`.
     if (body.empty()) {
       return failure();
     }
@@ -345,7 +345,7 @@ struct ConvertSelectionOpToSelect
     rewriter.create<spirv::StoreOp>(selectOp.getLoc(), ptrValue,
                                     selectOp.getResult(), storeOpAttributes);
 
-    // `spv.mlir.selection` is not needed anymore.
+    // `spirv.mlir.selection` is not needed anymore.
     rewriter.eraseOp(op);
     return success();
   }
@@ -353,8 +353,8 @@ struct ConvertSelectionOpToSelect
 private:
   // Checks that given blocks follow the following rules:
   // 1. Each conditional block consists of two operations, the first operation
-  //    is a `spv.Store` and the last operation is a `spv.Branch`.
-  // 2. Each `spv.Store` uses the same pointer and the same memory attributes.
+  //    is a `spirv.Store` and the last operation is a `spirv.Branch`.
+  // 2. Each `spirv.Store` uses the same pointer and the same memory attributes.
   // 3. A control flow goes into the given merge block from the given
   //    conditional blocks.
   LogicalResult canCanonicalizeSelection(Block *trueBlock, Block *falseBlock,
@@ -402,7 +402,7 @@ LogicalResult ConvertSelectionOpToSelect::canCanonicalizeSelection(
     return failure();
   }
 
-  // Checks that given type is valid for `spv.SelectOp`.
+  // Checks that given type is valid for `spirv.SelectOp`.
   // According to SPIR-V spec:
   // "Before version 1.4, Result Type must be a pointer, scalar, or vector.
   // Starting with version 1.4, Result Type can additionally be a composite type
@@ -412,7 +412,7 @@ LogicalResult ConvertSelectionOpToSelect::canCanonicalizeSelection(
                               .cast<spirv::SPIRVType>()
                               .isScalarOrVector();
 
-  // Check that each `spv.Store` uses the same pointer, memory access
+  // Check that each `spirv.Store` uses the same pointer, memory access
   // attributes and a valid type of the value.
   if ((trueBrStoreOp.getPtr() != falseBrStoreOp.getPtr()) ||
       !isSameAttrList(trueBrStoreOp, falseBrStoreOp) || !isScalarOrVector) {

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.td b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.td
index c07f9940e6cb..f65032652523 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.td
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVCanonicalization.td
@@ -14,14 +14,14 @@ include "mlir/IR/PatternBase.td"
 include "mlir/Dialect/SPIRV/IR/SPIRVOps.td"
 
 //===----------------------------------------------------------------------===//
-// spv.Bitcast
+// spirv.Bitcast
 //===----------------------------------------------------------------------===//
 
 def ConvertChainedBitcast : Pat<(SPV_BitcastOp (SPV_BitcastOp $operand)),
                                 (SPV_BitcastOp $operand)>;
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalNot
+// spirv.LogicalNot
 //===----------------------------------------------------------------------===//
 
 def ConvertLogicalNotOfIEqual : Pat<
@@ -41,7 +41,7 @@ def ConvertLogicalNotOfLogicalNotEqual : Pat<
     (SPV_LogicalEqualOp $lhs, $rhs)>;
 
 //===----------------------------------------------------------------------===//
-// spv.Select -> spv.GL.*Clamp
+// spirv.Select -> spirv.GL.*Clamp
 //===----------------------------------------------------------------------===//
 
 def ValuesAreEqual : Constraint<CPred<"$0 == $1">>;

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
index 5703d7e6df81..d911ea7f90ed 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVDialect.cpp
@@ -38,7 +38,8 @@ using namespace mlir::spirv;
 // InlinerInterface
 //===----------------------------------------------------------------------===//
 
-/// Returns true if the given region contains spv.Return or spv.ReturnValue ops.
+/// Returns true if the given region contains spirv.Return or spirv.ReturnValue
+/// ops.
 static inline bool containsReturn(Region &region) {
   return llvm::any_of(region, [](Block &block) {
     Operation *terminator = block.getTerminator();
@@ -61,8 +62,8 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
   /// 'dest' that is attached to an operation registered to the current dialect.
   bool isLegalToInline(Region *dest, Region *src, bool wouldBeCloned,
                        BlockAndValueMapping &) const final {
-    // Return true here when inlining into spv.func, spv.mlir.selection, and
-    // spv.mlir.loop operations.
+    // Return true here when inlining into spirv.func, spirv.mlir.selection, and
+    // spirv.mlir.loop operations.
     auto *op = dest->getParentOp();
     return isa<spirv::FuncOp, spirv::SelectionOp, spirv::LoopOp>(op);
   }
@@ -90,7 +91,7 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
       OpBuilder(op).create<spirv::BranchOp>(op->getLoc(), newDest);
       op->erase();
     } else if (auto retValOp = dyn_cast<spirv::ReturnValueOp>(op)) {
-      llvm_unreachable("unimplemented spv.ReturnValue in inliner");
+      llvm_unreachable("unimplemented spirv.ReturnValue in inliner");
     }
   }
 
@@ -98,14 +99,14 @@ struct SPIRVInlinerInterface : public DialectInlinerInterface {
   /// as necessary.
   void handleTerminator(Operation *op,
                         ArrayRef<Value> valuesToRepl) const final {
-    // Only spv.ReturnValue needs to be handled here.
+    // Only spirv.ReturnValue needs to be handled here.
     auto retValOp = dyn_cast<spirv::ReturnValueOp>(op);
     if (!retValOp)
       return;
 
     // Replace the values directly with the return operands.
     assert(valuesToRepl.size() == 1 &&
-           "spv.ReturnValue expected to only handle one result");
+           "spirv.ReturnValue expected to only handle one result");
     valuesToRepl.front().replaceAllUsesWith(retValOp.getValue());
   }
 };
@@ -279,7 +280,7 @@ static LogicalResult parseOptionalArrayStride(const SPIRVDialect &dialect,
 //                | vector-type
 //                | spirv-type
 //
-// array-type ::= `!spv.array` `<` integer-literal `x` element-type
+// array-type ::= `!spirv.array` `<` integer-literal `x` element-type
 //                (`,` `stride` `=` integer-literal)? `>`
 static Type parseArrayType(SPIRVDialect const &dialect,
                            DialectAsmParser &parser) {
@@ -317,7 +318,8 @@ static Type parseArrayType(SPIRVDialect const &dialect,
   return ArrayType::get(elementType, count, stride);
 }
 
-// cooperative-matrix-type ::= `!spv.coopmatrix` `<` element-type ',' scope ','
+// cooperative-matrix-type ::= `!spirv.coopmatrix` `<` element-type ',' scope
+// ','
 //                                                   rows ',' columns>`
 static Type parseCooperativeMatrixType(SPIRVDialect const &dialect,
                                        DialectAsmParser &parser) {
@@ -347,7 +349,8 @@ static Type parseCooperativeMatrixType(SPIRVDialect const &dialect,
   return CooperativeMatrixNVType::get(elementTy, scope, dims[0], dims[1]);
 }
 
-// joint-matrix-type ::= `!spv.jointmatrix` `<`rows `x` columns `x` element-type
+// joint-matrix-type ::= `!spirv.jointmatrix` `<`rows `x` columns `x`
+// element-type
 //                                                       `,` layout `,` scope`>`
 static Type parseJointMatrixType(SPIRVDialect const &dialect,
                                  DialectAsmParser &parser) {
@@ -388,7 +391,7 @@ static Type parseJointMatrixType(SPIRVDialect const &dialect,
 //                 | `Workgroup`
 //                 | <and other storage classes...>
 //
-// pointer-type ::= `!spv.ptr<` element-type `,` storage-class `>`
+// pointer-type ::= `!spirv.ptr<` element-type `,` storage-class `>`
 static Type parsePointerType(SPIRVDialect const &dialect,
                              DialectAsmParser &parser) {
   if (parser.parseLess())
@@ -414,7 +417,7 @@ static Type parsePointerType(SPIRVDialect const &dialect,
   return PointerType::get(pointeeType, *storageClass);
 }
 
-// runtime-array-type ::= `!spv.rtarray` `<` element-type
+// runtime-array-type ::= `!spirv.rtarray` `<` element-type
 //                        (`,` `stride` `=` integer-literal)? `>`
 static Type parseRuntimeArrayType(SPIRVDialect const &dialect,
                                   DialectAsmParser &parser) {
@@ -434,7 +437,7 @@ static Type parseRuntimeArrayType(SPIRVDialect const &dialect,
   return RuntimeArrayType::get(elementType, stride);
 }
 
-// matrix-type ::= `!spv.matrix` `<` integer-literal `x` element-type `>`
+// matrix-type ::= `!spirv.matrix` `<` integer-literal `x` element-type `>`
 static Type parseMatrixType(SPIRVDialect const &dialect,
                             DialectAsmParser &parser) {
   if (parser.parseLess())
@@ -559,7 +562,7 @@ struct ParseCommaSeparatedList<ParseType> {
 //
 // format ::= `Unknown` | `Rgba32f` | <and other SPIR-V Image formats...>
 //
-// image-type ::= `!spv.image<` element-type `,` dim `,` depth-info `,`
+// image-type ::= `!spirv.image<` element-type `,` dim `,` depth-info `,`
 //                              arrayed-info `,` sampling-info `,`
 //                              sampler-use-info `,` format `>`
 static Type parseImageType(SPIRVDialect const &dialect,
@@ -579,7 +582,7 @@ static Type parseImageType(SPIRVDialect const &dialect,
   return ImageType::get(*value);
 }
 
-// sampledImage-type :: = `!spv.sampledImage<` image-type `>`
+// sampledImage-type :: = `!spirv.sampledImage<` image-type `>`
 static Type parseSampledImageType(SPIRVDialect const &dialect,
                                   DialectAsmParser &parser) {
   if (parser.parseLess())
@@ -658,7 +661,7 @@ static ParseResult parseStructMemberDecorations(
 
 // struct-member-decoration ::= integer-literal? spirv-decoration*
 // struct-type ::=
-//             `!spv.struct<` (id `,`)?
+//             `!spirv.struct<` (id `,`)?
 //                          `(`
 //                            (spirv-type (`[` struct-member-decoration `]`)?)*
 //                          `)>`

diff  --git a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
index 3582d4569602..958f0fe4f2dc 100644
--- a/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/SPIRVOps.cpp
@@ -694,7 +694,7 @@ static Type
 getElementType(Type type, ArrayRef<int32_t> indices,
                function_ref<InFlightDiagnostic(StringRef)> emitErrorFn) {
   if (indices.empty()) {
-    emitErrorFn("expected at least one index for spv.CompositeExtract");
+    emitErrorFn("expected at least one index for spirv.CompositeExtract");
     return nullptr;
   }
 
@@ -725,7 +725,7 @@ getElementType(Type type, Attribute indices,
     return nullptr;
   }
   if (indicesArrayAttr.empty()) {
-    emitErrorFn("expected at least one index for spv.CompositeExtract");
+    emitErrorFn("expected at least one index for spirv.CompositeExtract");
     return nullptr;
   }
 
@@ -757,7 +757,7 @@ static Type getElementType(Type type, Attribute indices, OpAsmParser &parser,
   return getElementType(type, indices, errorFn);
 }
 
-/// Returns true if the given `block` only contains one `spv.mlir.merge` op.
+/// Returns true if the given `block` only contains one `spirv.mlir.merge` op.
 static inline bool isMergeBlock(Block &block) {
   return !block.empty() && std::next(block.begin()) == block.end() &&
          isa<spirv::MergeOp>(block.front());
@@ -977,13 +977,13 @@ static void buildLogicalUnaryOp(OpBuilder &builder, OperationState &state,
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AccessChainOp
+// spirv.AccessChainOp
 //===----------------------------------------------------------------------===//
 
 static Type getElementPtrType(Type type, ValueRange indices, Location baseLoc) {
   auto ptrType = type.dyn_cast<spirv::PointerType>();
   if (!ptrType) {
-    emitError(baseLoc, "'spv.AccessChain' op expected a pointer "
+    emitError(baseLoc, "'spirv.AccessChain' op expected a pointer "
                        "to composite type, but provided ")
         << type;
     return nullptr;
@@ -996,8 +996,9 @@ static Type getElementPtrType(Type type, ValueRange indices, Location baseLoc) {
   for (auto indexSSA : indices) {
     auto cType = resultType.dyn_cast<spirv::CompositeType>();
     if (!cType) {
-      emitError(baseLoc,
-                "'spv.AccessChain' op cannot extract from non-composite type ")
+      emitError(
+          baseLoc,
+          "'spirv.AccessChain' op cannot extract from non-composite type ")
           << resultType << " with index " << index;
       return nullptr;
     }
@@ -1005,23 +1006,24 @@ static Type getElementPtrType(Type type, ValueRange indices, Location baseLoc) {
     if (resultType.isa<spirv::StructType>()) {
       Operation *op = indexSSA.getDefiningOp();
       if (!op) {
-        emitError(baseLoc, "'spv.AccessChain' op index must be an "
-                           "integer spv.Constant to access "
-                           "element of spv.struct");
+        emitError(baseLoc, "'spirv.AccessChain' op index must be an "
+                           "integer spirv.Constant to access "
+                           "element of spirv.struct");
         return nullptr;
       }
 
       // TODO: this should be relaxed to allow
       // integer literals of other bitwidths.
       if (failed(extractValueFromConstOp(op, index))) {
-        emitError(baseLoc,
-                  "'spv.AccessChain' index must be an integer spv.Constant to "
-                  "access element of spv.struct, but provided ")
+        emitError(
+            baseLoc,
+            "'spirv.AccessChain' index must be an integer spirv.Constant to "
+            "access element of spirv.struct, but provided ")
             << op->getName();
         return nullptr;
       }
       if (index < 0 || static_cast<uint64_t>(index) >= cType.getNumElements()) {
-        emitError(baseLoc, "'spv.AccessChain' op index ")
+        emitError(baseLoc, "'spirv.AccessChain' op index ")
             << index << " out of bounds for " << resultType;
         return nullptr;
       }
@@ -1056,8 +1058,9 @@ ParseResult spirv::AccessChainOp::parse(OpAsmParser &parser,
   // Check that the provided indices list is not empty before parsing their
   // type list.
   if (indicesInfo.empty()) {
-    return mlir::emitError(result.location, "'spv.AccessChain' op expected at "
-                                            "least one index ");
+    return mlir::emitError(result.location,
+                           "'spirv.AccessChain' op expected at "
+                           "least one index ");
   }
 
   if (parser.parseComma() || parser.parseTypeList(indicesTypes))
@@ -1066,9 +1069,9 @@ ParseResult spirv::AccessChainOp::parse(OpAsmParser &parser,
   // Check that the indices types list is not empty and that it has a one-to-one
   // mapping to the provided indices.
   if (indicesTypes.size() != indicesInfo.size()) {
-    return mlir::emitError(result.location,
-                           "'spv.AccessChain' op indices types' count must be "
-                           "equal to indices info count");
+    return mlir::emitError(
+        result.location, "'spirv.AccessChain' op indices types' count must be "
+                         "equal to indices info count");
   }
 
   if (parser.resolveOperands(indicesInfo, indicesTypes, loc, result.operands))
@@ -1120,7 +1123,7 @@ LogicalResult spirv::AccessChainOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.addressof
+// spirv.mlir.addressof
 //===----------------------------------------------------------------------===//
 
 void spirv::AddressOfOp::build(OpBuilder &builder, OperationState &state,
@@ -1133,7 +1136,7 @@ LogicalResult spirv::AddressOfOp::verify() {
       SymbolTable::lookupNearestSymbolFrom((*this)->getParentOp(),
                                            getVariableAttr()));
   if (!varOp) {
-    return emitOpError("expected spv.GlobalVariable symbol");
+    return emitOpError("expected spirv.GlobalVariable symbol");
   }
   if (getPointer().getType() != varOp.getType()) {
     return emitOpError(
@@ -1216,7 +1219,7 @@ static LogicalResult verifyAtomicCompareExchangeImpl(T atomOp) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicAndOp
+// spirv.AtomicAndOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicAndOp::verify() {
@@ -1232,7 +1235,7 @@ void spirv::AtomicAndOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicCompareExchangeOp
+// spirv.AtomicCompareExchangeOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicCompareExchangeOp::verify() {
@@ -1248,7 +1251,7 @@ void spirv::AtomicCompareExchangeOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicCompareExchangeWeakOp
+// spirv.AtomicCompareExchangeWeakOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicCompareExchangeWeakOp::verify() {
@@ -1264,7 +1267,7 @@ void spirv::AtomicCompareExchangeWeakOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicExchange
+// spirv.AtomicExchange
 //===----------------------------------------------------------------------===//
 
 void spirv::AtomicExchangeOp::print(OpAsmPrinter &printer) {
@@ -1318,7 +1321,7 @@ LogicalResult spirv::AtomicExchangeOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicIAddOp
+// spirv.AtomicIAddOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicIAddOp::verify() {
@@ -1334,7 +1337,7 @@ void spirv::AtomicIAddOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.EXT.AtomicFAddOp
+// spirv.EXT.AtomicFAddOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::EXTAtomicFAddOp::verify() {
@@ -1350,7 +1353,7 @@ void spirv::EXTAtomicFAddOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicIDecrementOp
+// spirv.AtomicIDecrementOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicIDecrementOp::verify() {
@@ -1366,7 +1369,7 @@ void spirv::AtomicIDecrementOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicIIncrementOp
+// spirv.AtomicIIncrementOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicIIncrementOp::verify() {
@@ -1382,7 +1385,7 @@ void spirv::AtomicIIncrementOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicISubOp
+// spirv.AtomicISubOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicISubOp::verify() {
@@ -1398,7 +1401,7 @@ void spirv::AtomicISubOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicOrOp
+// spirv.AtomicOrOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicOrOp::verify() {
@@ -1414,7 +1417,7 @@ void spirv::AtomicOrOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicSMaxOp
+// spirv.AtomicSMaxOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicSMaxOp::verify() {
@@ -1430,7 +1433,7 @@ void spirv::AtomicSMaxOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicSMinOp
+// spirv.AtomicSMinOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicSMinOp::verify() {
@@ -1446,7 +1449,7 @@ void spirv::AtomicSMinOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicUMaxOp
+// spirv.AtomicUMaxOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicUMaxOp::verify() {
@@ -1462,7 +1465,7 @@ void spirv::AtomicUMaxOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicUMinOp
+// spirv.AtomicUMinOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicUMinOp::verify() {
@@ -1478,7 +1481,7 @@ void spirv::AtomicUMinOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicXorOp
+// spirv.AtomicXorOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::AtomicXorOp::verify() {
@@ -1494,7 +1497,7 @@ void spirv::AtomicXorOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitcastOp
+// spirv.BitcastOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::BitcastOp::verify() {
@@ -1526,7 +1529,7 @@ LogicalResult spirv::BitcastOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.PtrCastToGenericOp
+// spirv.PtrCastToGenericOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::PtrCastToGenericOp::verify() {
@@ -1554,7 +1557,7 @@ LogicalResult spirv::PtrCastToGenericOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GenericCastToPtrOp
+// spirv.GenericCastToPtrOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GenericCastToPtrOp::verify() {
@@ -1582,7 +1585,7 @@ LogicalResult spirv::GenericCastToPtrOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GenericCastToPtrExplicitOp
+// spirv.GenericCastToPtrExplicitOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GenericCastToPtrExplicitOp::verify() {
@@ -1610,7 +1613,7 @@ LogicalResult spirv::GenericCastToPtrExplicitOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BranchOp
+// spirv.BranchOp
 //===----------------------------------------------------------------------===//
 
 SuccessorOperands spirv::BranchOp::getSuccessorOperands(unsigned index) {
@@ -1619,7 +1622,7 @@ SuccessorOperands spirv::BranchOp::getSuccessorOperands(unsigned index) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BranchConditionalOp
+// spirv.BranchConditionalOp
 //===----------------------------------------------------------------------===//
 
 SuccessorOperands
@@ -1713,7 +1716,7 @@ LogicalResult spirv::BranchConditionalOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeConstruct
+// spirv.CompositeConstruct
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::CompositeConstructOp::verify() {
@@ -1789,7 +1792,7 @@ LogicalResult spirv::CompositeConstructOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeExtractOp
+// spirv.CompositeExtractOp
 //===----------------------------------------------------------------------===//
 
 void spirv::CompositeExtractOp::build(OpBuilder &builder, OperationState &state,
@@ -1849,7 +1852,7 @@ LogicalResult spirv::CompositeExtractOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeInsert
+// spirv.CompositeInsert
 //===----------------------------------------------------------------------===//
 
 void spirv::CompositeInsertOp::build(OpBuilder &builder, OperationState &state,
@@ -1904,7 +1907,7 @@ void spirv::CompositeInsertOp::print(OpAsmPrinter &printer) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Constant
+// spirv.Constant
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::ConstantOp::parse(OpAsmParser &parser,
@@ -1948,7 +1951,7 @@ static LogicalResult verifyConstantType(spirv::ConstantOp op, Attribute value,
     if (!arrayType)
       return op.emitOpError("result or element type (")
              << opType << ") does not match value type (" << valueType
-             << "), must be the same or spv.array";
+             << "), must be the same or spirv.array";
 
     int numElements = arrayType.getNumElements();
     auto opElemType = arrayType.getElementType();
@@ -1976,7 +1979,8 @@ static LogicalResult verifyConstantType(spirv::ConstantOp op, Attribute value,
   if (auto arrayAttr = value.dyn_cast<ArrayAttr>()) {
     auto arrayType = opType.dyn_cast<spirv::ArrayType>();
     if (!arrayType)
-      return op.emitOpError("must have spv.array result type for array value");
+      return op.emitOpError(
+          "must have spirv.array result type for array value");
     Type elemType = arrayType.getElementType();
     for (Attribute element : arrayAttr.getValue()) {
       // Verify array elements recursively.
@@ -2123,7 +2127,7 @@ void mlir::spirv::AddressOfOp::getAsmResultNames(
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ControlBarrierOp
+// spirv.ControlBarrierOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ControlBarrierOp::verify() {
@@ -2131,7 +2135,7 @@ LogicalResult spirv::ControlBarrierOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertFToSOp
+// spirv.ConvertFToSOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ConvertFToSOp::verify() {
@@ -2140,7 +2144,7 @@ LogicalResult spirv::ConvertFToSOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertFToUOp
+// spirv.ConvertFToUOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ConvertFToUOp::verify() {
@@ -2149,7 +2153,7 @@ LogicalResult spirv::ConvertFToUOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertSToFOp
+// spirv.ConvertSToFOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ConvertSToFOp::verify() {
@@ -2158,7 +2162,7 @@ LogicalResult spirv::ConvertSToFOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertUToFOp
+// spirv.ConvertUToFOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ConvertUToFOp::verify() {
@@ -2167,7 +2171,7 @@ LogicalResult spirv::ConvertUToFOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.EntryPoint
+// spirv.EntryPoint
 //===----------------------------------------------------------------------===//
 
 void spirv::EntryPointOp::build(OpBuilder &builder, OperationState &state,
@@ -2227,7 +2231,7 @@ LogicalResult spirv::EntryPointOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ExecutionMode
+// spirv.ExecutionMode
 //===----------------------------------------------------------------------===//
 
 void spirv::ExecutionModeOp::build(OpBuilder &builder, OperationState &state,
@@ -2277,7 +2281,7 @@ void spirv::ExecutionModeOp::print(OpAsmPrinter &printer) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FConvertOp
+// spirv.FConvertOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::FConvertOp::verify() {
@@ -2285,7 +2289,7 @@ LogicalResult spirv::FConvertOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SConvertOp
+// spirv.SConvertOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::SConvertOp::verify() {
@@ -2293,7 +2297,7 @@ LogicalResult spirv::SConvertOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.UConvertOp
+// spirv.UConvertOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::UConvertOp::verify() {
@@ -2301,7 +2305,7 @@ LogicalResult spirv::UConvertOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.func
+// spirv.func
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::FuncOp::parse(OpAsmParser &parser, OperationState &result) {
@@ -2436,7 +2440,7 @@ ArrayRef<Type> spirv::FuncOp::getCallableResults() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FunctionCall
+// spirv.FunctionCall
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::FunctionCallOp::verify() {
@@ -2497,7 +2501,7 @@ Operation::operand_range spirv::FunctionCallOp::getArgOperands() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GLFClampOp
+// spirv.GLFClampOp
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::GLFClampOp::parse(OpAsmParser &parser,
@@ -2507,7 +2511,7 @@ ParseResult spirv::GLFClampOp::parse(OpAsmParser &parser,
 void spirv::GLFClampOp::print(OpAsmPrinter &p) { printOneResultOp(*this, p); }
 
 //===----------------------------------------------------------------------===//
-// spv.GLUClampOp
+// spirv.GLUClampOp
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::GLUClampOp::parse(OpAsmParser &parser,
@@ -2517,7 +2521,7 @@ ParseResult spirv::GLUClampOp::parse(OpAsmParser &parser,
 void spirv::GLUClampOp::print(OpAsmPrinter &p) { printOneResultOp(*this, p); }
 
 //===----------------------------------------------------------------------===//
-// spv.GLSClampOp
+// spirv.GLSClampOp
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::GLSClampOp::parse(OpAsmParser &parser,
@@ -2527,7 +2531,7 @@ ParseResult spirv::GLSClampOp::parse(OpAsmParser &parser,
 void spirv::GLSClampOp::print(OpAsmPrinter &p) { printOneResultOp(*this, p); }
 
 //===----------------------------------------------------------------------===//
-// spv.GLFmaOp
+// spirv.GLFmaOp
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::GLFmaOp::parse(OpAsmParser &parser, OperationState &result) {
@@ -2536,7 +2540,7 @@ ParseResult spirv::GLFmaOp::parse(OpAsmParser &parser, OperationState &result) {
 void spirv::GLFmaOp::print(OpAsmPrinter &p) { printOneResultOp(*this, p); }
 
 //===----------------------------------------------------------------------===//
-// spv.GlobalVariable
+// spirv.GlobalVariable
 //===----------------------------------------------------------------------===//
 
 void spirv::GlobalVariableOp::build(OpBuilder &builder, OperationState &state,
@@ -2589,7 +2593,7 @@ ParseResult spirv::GlobalVariableOp::parse(OpAsmParser &parser,
     return failure();
   }
   if (!type.isa<spirv::PointerType>()) {
-    return parser.emitError(loc, "expected spv.ptr type");
+    return parser.emitError(loc, "expected spirv.ptr type");
   }
   result.addAttribute(kTypeAttrName, TypeAttr::get(type));
 
@@ -2622,7 +2626,7 @@ LogicalResult spirv::GlobalVariableOp::verify() {
   // SPIR-V spec: "Storage Class is the Storage Class of the memory holding the
   // object. It cannot be Generic. It must be the same as the Storage Class
   // operand of the Result Type."
-  // Also, Function storage class is reserved by spv.Variable.
+  // Also, Function storage class is reserved by spirv.Variable.
   auto storageClass = this->storageClass();
   if (storageClass == spirv::StorageClass::Generic ||
       storageClass == spirv::StorageClass::Function) {
@@ -2640,7 +2644,7 @@ LogicalResult spirv::GlobalVariableOp::verify() {
     if (!initOp ||
         !isa<spirv::GlobalVariableOp, spirv::SpecConstantOp>(initOp)) {
       return emitOpError("initializer must be result of a "
-                         "spv.SpecConstant or spv.GlobalVariable op");
+                         "spirv.SpecConstant or spirv.GlobalVariable op");
     }
   }
 
@@ -2648,7 +2652,7 @@ LogicalResult spirv::GlobalVariableOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupBroadcast
+// spirv.GroupBroadcast
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupBroadcastOp::verify() {
@@ -2666,7 +2670,7 @@ LogicalResult spirv::GroupBroadcastOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformBallotOp
+// spirv.GroupNonUniformBallotOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformBallotOp::verify() {
@@ -2678,7 +2682,7 @@ LogicalResult spirv::GroupNonUniformBallotOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformBroadcast
+// spirv.GroupNonUniformBroadcast
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformBroadcastOp::verify() {
@@ -2703,7 +2707,7 @@ LogicalResult spirv::GroupNonUniformBroadcastOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformShuffle*
+// spirv.GroupNonUniformShuffle*
 //===----------------------------------------------------------------------===//
 
 template <typename OpTy>
@@ -2732,7 +2736,7 @@ LogicalResult spirv::GroupNonUniformShuffleXorOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.INTEL.SubgroupBlockRead
+// spirv.INTEL.SubgroupBlockRead
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::INTELSubgroupBlockReadOp::parse(OpAsmParser &parser,
@@ -2770,7 +2774,7 @@ LogicalResult spirv::INTELSubgroupBlockReadOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.INTEL.SubgroupBlockWrite
+// spirv.INTEL.SubgroupBlockWrite
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::INTELSubgroupBlockWriteOp::parse(OpAsmParser &parser,
@@ -2810,7 +2814,7 @@ LogicalResult spirv::INTELSubgroupBlockWriteOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformElectOp
+// spirv.GroupNonUniformElectOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformElectOp::verify() {
@@ -2822,7 +2826,7 @@ LogicalResult spirv::GroupNonUniformElectOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformFAddOp
+// spirv.GroupNonUniformFAddOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformFAddOp::verify() {
@@ -2838,7 +2842,7 @@ void spirv::GroupNonUniformFAddOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformFMaxOp
+// spirv.GroupNonUniformFMaxOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformFMaxOp::verify() {
@@ -2854,7 +2858,7 @@ void spirv::GroupNonUniformFMaxOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformFMinOp
+// spirv.GroupNonUniformFMinOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformFMinOp::verify() {
@@ -2870,7 +2874,7 @@ void spirv::GroupNonUniformFMinOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformFMulOp
+// spirv.GroupNonUniformFMulOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformFMulOp::verify() {
@@ -2886,7 +2890,7 @@ void spirv::GroupNonUniformFMulOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformIAddOp
+// spirv.GroupNonUniformIAddOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformIAddOp::verify() {
@@ -2902,7 +2906,7 @@ void spirv::GroupNonUniformIAddOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformIMulOp
+// spirv.GroupNonUniformIMulOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformIMulOp::verify() {
@@ -2918,7 +2922,7 @@ void spirv::GroupNonUniformIMulOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformSMaxOp
+// spirv.GroupNonUniformSMaxOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformSMaxOp::verify() {
@@ -2934,7 +2938,7 @@ void spirv::GroupNonUniformSMaxOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformSMinOp
+// spirv.GroupNonUniformSMinOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformSMinOp::verify() {
@@ -2950,7 +2954,7 @@ void spirv::GroupNonUniformSMinOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformUMaxOp
+// spirv.GroupNonUniformUMaxOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformUMaxOp::verify() {
@@ -2966,7 +2970,7 @@ void spirv::GroupNonUniformUMaxOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GroupNonUniformUMinOp
+// spirv.GroupNonUniformUMinOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GroupNonUniformUMinOp::verify() {
@@ -2982,7 +2986,7 @@ void spirv::GroupNonUniformUMinOp::print(OpAsmPrinter &p) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.IAddCarryOp
+// spirv.IAddCarryOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::IAddCarryOp::verify() {
@@ -3013,7 +3017,7 @@ ParseResult spirv::IAddCarryOp::parse(OpAsmParser &parser,
 
   auto structType = resultType.dyn_cast<spirv::StructType>();
   if (!structType || structType.getNumElements() != 2)
-    return parser.emitError(loc, "expected spv.struct type with two members");
+    return parser.emitError(loc, "expected spirv.struct type with two members");
 
   SmallVector<Type, 2> operandTypes(2, structType.getElementType(0));
   if (parser.resolveOperands(operands, operandTypes, loc, result.operands))
@@ -3031,7 +3035,7 @@ void spirv::IAddCarryOp::print(OpAsmPrinter &printer) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ISubBorrowOp
+// spirv.ISubBorrowOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ISubBorrowOp::verify() {
@@ -3062,7 +3066,7 @@ ParseResult spirv::ISubBorrowOp::parse(OpAsmParser &parser,
 
   auto structType = resultType.dyn_cast<spirv::StructType>();
   if (!structType || structType.getNumElements() != 2)
-    return parser.emitError(loc, "expected spv.struct type with two members");
+    return parser.emitError(loc, "expected spirv.struct type with two members");
 
   SmallVector<Type, 2> operandTypes(2, structType.getElementType(0));
   if (parser.resolveOperands(operands, operandTypes, loc, result.operands))
@@ -3080,7 +3084,7 @@ void spirv::ISubBorrowOp::print(OpAsmPrinter &printer) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LoadOp
+// spirv.LoadOp
 //===----------------------------------------------------------------------===//
 
 void spirv::LoadOp::build(OpBuilder &builder, OperationState &state,
@@ -3135,7 +3139,7 @@ LogicalResult spirv::LoadOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.loop
+// spirv.mlir.loop
 //===----------------------------------------------------------------------===//
 
 void spirv::LoopOp::build(OpBuilder &builder, OperationState &state) {
@@ -3160,7 +3164,7 @@ void spirv::LoopOp::print(OpAsmPrinter &printer) {
                       /*printBlockTerminators=*/true);
 }
 
-/// Returns true if the given `srcBlock` contains only one `spv.Branch` to the
+/// Returns true if the given `srcBlock` contains only one `spirv.Branch` to the
 /// given `dstBlock`.
 static inline bool hasOneBranchOpTo(Block &srcBlock, Block &dstBlock) {
   // Check that there is only one op in the `srcBlock`.
@@ -3208,8 +3212,8 @@ LogicalResult spirv::LoopOp::verifyRegions() {
   // The last block is the merge block.
   Block &merge = region.back();
   if (!isMergeBlock(merge))
-    return emitOpError(
-        "last block must be the merge block with only one 'spv.mlir.merge' op");
+    return emitOpError("last block must be the merge block with only one "
+                       "'spirv.mlir.merge' op");
 
   if (std::next(region.begin()) == region.end())
     return emitOpError(
@@ -3225,7 +3229,7 @@ LogicalResult spirv::LoopOp::verifyRegions() {
 
   if (!hasOneBranchOpTo(entry, header))
     return emitOpError(
-        "entry block must only have one 'spv.Branch' op to the second block");
+        "entry block must only have one 'spirv.Branch' op to the second block");
 
   if (std::next(region.begin(), 3) == region.end())
     return emitOpError(
@@ -3286,12 +3290,12 @@ void spirv::LoopOp::addEntryAndMergeBlock() {
   getBody().push_back(mergeBlock);
   OpBuilder builder = OpBuilder::atBlockEnd(mergeBlock);
 
-  // Add a spv.mlir.merge op into the merge block.
+  // Add a spirv.mlir.merge op into the merge block.
   builder.create<spirv::MergeOp>(getLoc());
 }
 
 //===----------------------------------------------------------------------===//
-// spv.MemoryBarrierOp
+// spirv.MemoryBarrierOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::MemoryBarrierOp::verify() {
@@ -3299,25 +3303,25 @@ LogicalResult spirv::MemoryBarrierOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.merge
+// spirv.mlir.merge
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::MergeOp::verify() {
   auto *parentOp = (*this)->getParentOp();
   if (!parentOp || !isa<spirv::SelectionOp, spirv::LoopOp>(parentOp))
     return emitOpError(
-        "expected parent op to be 'spv.mlir.selection' or 'spv.mlir.loop'");
+        "expected parent op to be 'spirv.mlir.selection' or 'spirv.mlir.loop'");
 
   // TODO: This check should be done in `verifyRegions` of parent op.
   Block &parentLastBlock = (*this)->getParentRegion()->back();
   if (getOperation() != parentLastBlock.getTerminator())
     return emitOpError("can only be used in the last block of "
-                       "'spv.mlir.selection' or 'spv.mlir.loop'");
+                       "'spirv.mlir.selection' or 'spirv.mlir.loop'");
   return success();
 }
 
 //===----------------------------------------------------------------------===//
-// spv.module
+// spirv.module
 //===----------------------------------------------------------------------===//
 
 void spirv::ModuleOp::build(OpBuilder &builder, OperationState &state,
@@ -3419,7 +3423,7 @@ LogicalResult spirv::ModuleOp::verifyRegions() {
 
   for (auto &op : *getBody()) {
     if (op.getDialect() != dialect)
-      return op.emitError("'spv.module' can only contain spv.* ops");
+      return op.emitError("'spirv.module' can only contain spirv.* ops");
 
     // For EntryPoint op, check that the function and execution model is not
     // duplicated in EntryPointOps. Also verify that the interface specified
@@ -3428,7 +3432,7 @@ LogicalResult spirv::ModuleOp::verifyRegions() {
       auto funcOp = table.lookup<spirv::FuncOp>(entryPointOp.getFn());
       if (!funcOp) {
         return entryPointOp.emitError("function '")
-               << entryPointOp.getFn() << "' not found in 'spv.module'";
+               << entryPointOp.getFn() << "' not found in 'spirv.module'";
       }
       if (auto interface = entryPointOp.getInterface()) {
         for (Attribute varRef : interface) {
@@ -3442,7 +3446,7 @@ LogicalResult spirv::ModuleOp::verifyRegions() {
           auto variableOp =
               table.lookup<spirv::GlobalVariableOp>(varSymRef.getValue());
           if (!variableOp) {
-            return entryPointOp.emitError("expected spv.GlobalVariable "
+            return entryPointOp.emitError("expected spirv.GlobalVariable "
                                           "symbol reference instead of'")
                    << varSymRef << "'";
           }
@@ -3458,14 +3462,14 @@ LogicalResult spirv::ModuleOp::verifyRegions() {
       entryPoints[key] = entryPointOp;
     } else if (auto funcOp = dyn_cast<spirv::FuncOp>(op)) {
       if (funcOp.isExternal())
-        return op.emitError("'spv.module' cannot contain external functions");
+        return op.emitError("'spirv.module' cannot contain external functions");
 
-      // TODO: move this check to spv.func.
+      // TODO: move this check to spirv.func.
       for (auto &block : funcOp)
         for (auto &op : block) {
           if (op.getDialect() != dialect)
             return op.emitError(
-                "functions in 'spv.module' can only contain spv.* ops");
+                "functions in 'spirv.module' can only contain spirv.* ops");
         }
     }
   }
@@ -3474,7 +3478,7 @@ LogicalResult spirv::ModuleOp::verifyRegions() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.referenceof
+// spirv.mlir.referenceof
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ReferenceOfOp::verify() {
@@ -3493,7 +3497,7 @@ LogicalResult spirv::ReferenceOfOp::verify() {
 
   if (!specConstOp && !specConstCompositeOp)
     return emitOpError(
-        "expected spv.SpecConstant or spv.SpecConstantComposite symbol");
+        "expected spirv.SpecConstant or spirv.SpecConstantComposite symbol");
 
   if (getReference().getType() != constType)
     return emitOpError("result type mismatch with the referenced "
@@ -3503,25 +3507,25 @@ LogicalResult spirv::ReferenceOfOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Return
+// spirv.Return
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ReturnOp::verify() {
-  // Verification is performed in spv.func op.
+  // Verification is performed in spirv.func op.
   return success();
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ReturnValue
+// spirv.ReturnValue
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ReturnValueOp::verify() {
-  // Verification is performed in spv.func op.
+  // Verification is performed in spirv.func op.
   return success();
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Select
+// spirv.Select
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::SelectOp::verify() {
@@ -3540,7 +3544,7 @@ LogicalResult spirv::SelectOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.selection
+// spirv.mlir.selection
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::SelectionOp::parse(OpAsmParser &parser,
@@ -3592,8 +3596,8 @@ LogicalResult spirv::SelectionOp::verifyRegions() {
 
   // The last block is the merge block.
   if (!isMergeBlock(region.back()))
-    return emitOpError(
-        "last block must be the merge block with only one 'spv.mlir.merge' op");
+    return emitOpError("last block must be the merge block with only one "
+                       "'spirv.mlir.merge' op");
 
   if (std::next(region.begin()) == region.end())
     return emitOpError("must have a selection header block");
@@ -3619,7 +3623,7 @@ void spirv::SelectionOp::addMergeBlock() {
   getBody().push_back(mergeBlock);
   OpBuilder builder = OpBuilder::atBlockEnd(mergeBlock);
 
-  // Add a spv.mlir.merge op into the merge block.
+  // Add a spirv.mlir.merge op into the merge block.
   builder.create<spirv::MergeOp>(getLoc());
 }
 
@@ -3655,7 +3659,7 @@ spirv::SelectionOp spirv::SelectionOp::createIfThen(
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SpecConstant
+// spirv.SpecConstant
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::SpecConstantOp::parse(OpAsmParser &parser,
@@ -3709,7 +3713,7 @@ LogicalResult spirv::SpecConstantOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.StoreOp
+// spirv.StoreOp
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::StoreOp::parse(OpAsmParser &parser, OperationState &result) {
@@ -3754,7 +3758,7 @@ LogicalResult spirv::StoreOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Unreachable
+// spirv.Unreachable
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::UnreachableOp::verify() {
@@ -3773,7 +3777,7 @@ LogicalResult spirv::UnreachableOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Variable
+// spirv.Variable
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::VariableOp::parse(OpAsmParser &parser,
@@ -3801,7 +3805,7 @@ ParseResult spirv::VariableOp::parse(OpAsmParser &parser,
 
   auto ptrType = type.dyn_cast<spirv::PointerType>();
   if (!ptrType)
-    return parser.emitError(loc, "expected spv.ptr type");
+    return parser.emitError(loc, "expected spirv.ptr type");
   result.addTypes(ptrType);
 
   // Resolve the initializer operand
@@ -3836,7 +3840,7 @@ LogicalResult spirv::VariableOp::verify() {
   if (getStorageClass() != spirv::StorageClass::Function) {
     return emitOpError(
         "can only be used to model function-level variables. Use "
-        "spv.GlobalVariable for module-level variables.");
+        "spirv.GlobalVariable for module-level variables.");
   }
 
   auto pointerType = getPointer().getType().cast<spirv::PointerType>();
@@ -3852,7 +3856,7 @@ LogicalResult spirv::VariableOp::verify() {
                         spirv::ReferenceOfOp, // for spec constant
                         spirv::AddressOfOp>(initOp))
       return emitOpError("initializer must be the result of a "
-                         "constant or spv.GlobalVariable op");
+                         "constant or spirv.GlobalVariable op");
   }
 
   // TODO: generate these strings using ODS.
@@ -3867,14 +3871,14 @@ LogicalResult spirv::VariableOp::verify() {
   for (const auto &attr : {descriptorSetName, bindingName, builtInName}) {
     if (op->getAttr(attr))
       return emitOpError("cannot have '")
-             << attr << "' attribute (only allowed in spv.GlobalVariable)";
+             << attr << "' attribute (only allowed in spirv.GlobalVariable)";
   }
 
   return success();
 }
 
 //===----------------------------------------------------------------------===//
-// spv.VectorShuffle
+// spirv.VectorShuffle
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::VectorShuffleOp::verify() {
@@ -3903,7 +3907,7 @@ LogicalResult spirv::VectorShuffleOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.NV.CooperativeMatrixLoad
+// spirv.NV.CooperativeMatrixLoad
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::NVCooperativeMatrixLoadOp::parse(OpAsmParser &parser,
@@ -3962,7 +3966,7 @@ LogicalResult spirv::NVCooperativeMatrixLoadOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.NV.CooperativeMatrixStore
+// spirv.NV.CooperativeMatrixStore
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::NVCooperativeMatrixStoreOp::parse(OpAsmParser &parser,
@@ -4002,7 +4006,7 @@ LogicalResult spirv::NVCooperativeMatrixStoreOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.NV.CooperativeMatrixMulAdd
+// spirv.NV.CooperativeMatrixMulAdd
 //===----------------------------------------------------------------------===//
 
 static LogicalResult
@@ -4051,7 +4055,7 @@ verifyPointerAndJointMatrixType(Operation *op, Type pointer, Type jointMatrix) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.INTEL.JointMatrixLoad
+// spirv.INTEL.JointMatrixLoad
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::INTELJointMatrixLoadOp::verify() {
@@ -4060,7 +4064,7 @@ LogicalResult spirv::INTELJointMatrixLoadOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.INTEL.JointMatrixStore
+// spirv.INTEL.JointMatrixStore
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::INTELJointMatrixStoreOp::verify() {
@@ -4069,7 +4073,7 @@ LogicalResult spirv::INTELJointMatrixStoreOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.INTEL.JointMatrixMad
+// spirv.INTEL.JointMatrixMad
 //===----------------------------------------------------------------------===//
 
 static LogicalResult verifyJointMatrixMad(spirv::INTELJointMatrixMadOp op) {
@@ -4098,7 +4102,7 @@ LogicalResult spirv::INTELJointMatrixMadOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.MatrixTimesScalar
+// spirv.MatrixTimesScalar
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::MatrixTimesScalarOp::verify() {
@@ -4135,7 +4139,7 @@ LogicalResult spirv::MatrixTimesScalarOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.CopyMemory
+// spirv.CopyMemory
 //===----------------------------------------------------------------------===//
 
 void spirv::CopyMemoryOp::print(OpAsmPrinter &printer) {
@@ -4229,7 +4233,7 @@ LogicalResult spirv::CopyMemoryOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Transpose
+// spirv.Transpose
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::TransposeOp::verify() {
@@ -4254,7 +4258,7 @@ LogicalResult spirv::TransposeOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.MatrixTimesMatrix
+// spirv.MatrixTimesMatrix
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::MatrixTimesMatrixOp::verify() {
@@ -4290,7 +4294,7 @@ LogicalResult spirv::MatrixTimesMatrixOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SpecConstantComposite
+// spirv.SpecConstantComposite
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::SpecConstantCompositeOp::parse(OpAsmParser &parser,
@@ -4380,7 +4384,7 @@ LogicalResult spirv::SpecConstantCompositeOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SpecConstantOperation
+// spirv.SpecConstantOperation
 //===----------------------------------------------------------------------===//
 
 ParseResult spirv::SpecConstantOperationOp::parse(OpAsmParser &parser,
@@ -4436,7 +4440,7 @@ LogicalResult spirv::SpecConstantOperationOp::verifyRegions() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.FrexpStruct
+// spirv.GL.FrexpStruct
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GLFrexpStructOp::verify() {
@@ -4483,7 +4487,7 @@ LogicalResult spirv::GLFrexpStructOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Ldexp
+// spirv.GL.Ldexp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::GLLdexpOp::verify() {
@@ -4506,7 +4510,7 @@ LogicalResult spirv::GLLdexpOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ImageDrefGather
+// spirv.ImageDrefGather
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ImageDrefGatherOp::verify() {
@@ -4544,7 +4548,7 @@ LogicalResult spirv::ImageDrefGatherOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftLeftLogicalOp
+// spirv.ShiftLeftLogicalOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ShiftLeftLogicalOp::verify() {
@@ -4552,7 +4556,7 @@ LogicalResult spirv::ShiftLeftLogicalOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftRightArithmeticOp
+// spirv.ShiftRightArithmeticOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ShiftRightArithmeticOp::verify() {
@@ -4560,7 +4564,7 @@ LogicalResult spirv::ShiftRightArithmeticOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftRightLogicalOp
+// spirv.ShiftRightLogicalOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ShiftRightLogicalOp::verify() {
@@ -4568,7 +4572,7 @@ LogicalResult spirv::ShiftRightLogicalOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ImageQuerySize
+// spirv.ImageQuerySize
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::ImageQuerySizeOp::verify() {
@@ -4682,7 +4686,7 @@ static auto concatElemAndIndices(Op op) {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.InBoundsPtrAccessChainOp
+// spirv.InBoundsPtrAccessChainOp
 //===----------------------------------------------------------------------===//
 
 void spirv::InBoundsPtrAccessChainOp::build(OpBuilder &builder,
@@ -4709,7 +4713,7 @@ LogicalResult spirv::InBoundsPtrAccessChainOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.PtrAccessChainOp
+// spirv.PtrAccessChainOp
 //===----------------------------------------------------------------------===//
 
 void spirv::PtrAccessChainOp::build(OpBuilder &builder, OperationState &state,
@@ -4735,7 +4739,7 @@ LogicalResult spirv::PtrAccessChainOp::verify() {
 }
 
 //===----------------------------------------------------------------------===//
-// spv.VectorTimesScalarOp
+// spirv.VectorTimesScalarOp
 //===----------------------------------------------------------------------===//
 
 LogicalResult spirv::VectorTimesScalarOp::verify() {

diff  --git a/mlir/lib/Dialect/SPIRV/IR/TargetAndABI.cpp b/mlir/lib/Dialect/SPIRV/IR/TargetAndABI.cpp
index b588afa10647..a36527dfa7c8 100644
--- a/mlir/lib/Dialect/SPIRV/IR/TargetAndABI.cpp
+++ b/mlir/lib/Dialect/SPIRV/IR/TargetAndABI.cpp
@@ -95,7 +95,7 @@ MLIRContext *spirv::TargetEnv::getContext() const {
 //===----------------------------------------------------------------------===//
 
 StringRef spirv::getInterfaceVarABIAttrName() {
-  return "spv.interface_var_abi";
+  return "spirv.interface_var_abi";
 }
 
 spirv::InterfaceVarABIAttr
@@ -116,7 +116,7 @@ bool spirv::needsInterfaceVarABIAttrs(spirv::TargetEnvAttr targetAttr) {
   return false;
 }
 
-StringRef spirv::getEntryPointABIAttrName() { return "spv.entry_point_abi"; }
+StringRef spirv::getEntryPointABIAttrName() { return "spirv.entry_point_abi"; }
 
 spirv::EntryPointABIAttr
 spirv::getEntryPointABIAttr(ArrayRef<int32_t> localSize, MLIRContext *context) {
@@ -164,7 +164,7 @@ spirv::getDefaultResourceLimits(MLIRContext *context) {
       /*cooperative_matrix_properties_nv=*/ArrayAttr());
 }
 
-StringRef spirv::getTargetEnvAttrName() { return "spv.target_env"; }
+StringRef spirv::getTargetEnvAttrName() { return "spirv.target_env"; }
 
 spirv::TargetEnvAttr spirv::getDefaultTargetEnv(MLIRContext *context) {
   auto triple = spirv::VerCapExtAttr::get(spirv::Version::V_1_0,

diff  --git a/mlir/lib/Dialect/SPIRV/Linking/ModuleCombiner/ModuleCombiner.cpp b/mlir/lib/Dialect/SPIRV/Linking/ModuleCombiner/ModuleCombiner.cpp
index 888e1ca8456c..3083bb8d8ce6 100644
--- a/mlir/lib/Dialect/SPIRV/Linking/ModuleCombiner/ModuleCombiner.cpp
+++ b/mlir/lib/Dialect/SPIRV/Linking/ModuleCombiner/ModuleCombiner.cpp
@@ -130,9 +130,9 @@ OwningOpRef<spirv::ModuleOp> combine(ArrayRef<spirv::ModuleOp> inputModules,
 
     // In the combined module, rename all symbols that conflict with symbols
     // from the current input module. This renaming applies to all ops except
-    // for spv.funcs. This way, if the conflicting op in the input module is
-    // non-spv.func, we rename that symbol instead and maintain the spv.func in
-    // the combined module name as it is.
+    // for spirv.funcs. This way, if the conflicting op in the input module is
+    // non-spirv.func, we rename that symbol instead and maintain the spirv.func
+    // in the combined module name as it is.
     for (auto &op : *combinedModule.getBody()) {
       auto symbolOp = dyn_cast<SymbolOpInterface>(op);
       if (!symbolOp)
@@ -169,7 +169,7 @@ OwningOpRef<spirv::ModuleOp> combine(ArrayRef<spirv::ModuleOp> inputModules,
     }
 
     // In the current input module, rename all symbols that conflict with
-    // symbols from the combined module. This includes renaming spv.funcs.
+    // symbols from the combined module. This includes renaming spirv.funcs.
     for (auto &op : *moduleClone->getBody()) {
       auto symbolOp = dyn_cast<SymbolOpInterface>(op);
       if (!symbolOp)

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
index 8ed2d0095a54..107d96194ff8 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/LowerABIAttributesPass.cpp
@@ -44,8 +44,8 @@ createGlobalVarForEntryPointArgument(OpBuilder &builder, spirv::FuncOp funcOp,
       funcOp.getName().str() + "_arg_" + std::to_string(argIndex);
 
   // Get the type of variable. If this is a scalar/vector type and has an ABI
-  // info create a variable of type !spv.ptr<!spv.struct<elementType>>. If not
-  // it must already be a !spv.ptr<!spv.struct<...>>.
+  // info create a variable of type !spirv.ptr<!spirv.struct<elementType>>. If
+  // not it must already be a !spirv.ptr<!spirv.struct<...>>.
   auto varType = funcOp.getFunctionType().getInput(argIndex);
   if (varType.cast<spirv::SPIRVType>().isScalarOrVector()) {
     auto storageClass = abiInfo.getStorageClass();
@@ -73,7 +73,7 @@ createGlobalVarForEntryPointArgument(OpBuilder &builder, spirv::FuncOp funcOp,
 }
 
 /// Gets the global variables that need to be specified as interface variable
-/// with an spv.EntryPointOp. Traverses the body of a entry function to do so.
+/// with an spirv.EntryPointOp. Traverses the body of a entry function to do so.
 static LogicalResult
 getInterfaceVariables(spirv::FuncOp funcOp,
                       SmallVectorImpl<Attribute> &interfaceVars) {
@@ -124,7 +124,7 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
   auto spirvModule = funcOp->getParentOfType<spirv::ModuleOp>();
   builder.setInsertionPointToEnd(spirvModule.getBody());
 
-  // Adds the spv.EntryPointOp after collecting all the interface variables
+  // Adds the spirv.EntryPointOp after collecting all the interface variables
   // needed.
   SmallVector<Attribute, 1> interfaceVars;
   if (failed(getInterfaceVariables(funcOp, interfaceVars))) {
@@ -136,12 +136,12 @@ static LogicalResult lowerEntryPointABIAttr(spirv::FuncOp funcOp,
       spirv::getExecutionModel(targetEnv);
   if (failed(executionModel))
     return funcOp.emitRemark("lower entry point failure: could not select "
-                             "execution model based on 'spv.target_env'");
+                             "execution model based on 'spirv.target_env'");
 
   builder.create<spirv::EntryPointOp>(funcOp.getLoc(), executionModel.value(),
                                       funcOp, interfaceVars);
 
-  // Specifies the spv.ExecutionModeOp.
+  // Specifies the spirv.ExecutionModeOp.
   auto localSizeAttr = entryPointAttr.getLocalSize();
   if (localSizeAttr) {
     auto values = localSizeAttr.getValues<int32_t>();

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
index c52d484ea57a..3e2d0e9db575 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/SPIRVConversion.cpp
@@ -510,7 +510,7 @@ FuncOpConversion::matchAndRewrite(func::FuncOp funcOp, OpAdaptor adaptor,
       return failure();
   }
 
-  // Create the converted spv.func op.
+  // Create the converted spirv.func op.
   auto newFuncOp = rewriter.create<spirv::FuncOp>(
       funcOp.getLoc(), funcOp.getName(),
       rewriter.getFunctionType(signatureConverter.getConvertedTypes(),
@@ -545,7 +545,7 @@ void mlir::populateBuiltinFuncToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
 static spirv::GlobalVariableOp getBuiltinVariable(Block &body,
                                                   spirv::BuiltIn builtin) {
   // Look through all global variables in the given `body` block and check if
-  // there is a spv.GlobalVariable that has the same `builtin` attribute.
+  // there is a spirv.GlobalVariable that has the same `builtin` attribute.
   for (auto varOp : body.getOps<spirv::GlobalVariableOp>()) {
     if (auto builtinAttr = varOp->getAttrOfType<StringAttr>(
             spirv::SPIRVDialect::getAttributeName(

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
index 397c330b954f..8565e9793076 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/UnifyAliasedResourcePass.cpp
@@ -61,7 +61,8 @@ static AliasedResourceMap collectAliasedResources(spirv::ModuleOp moduleOp) {
 }
 
 /// Returns the element type if the given `type` is a runtime array resource:
-/// `!spv.ptr<!spv.struct<!spv.rtarray<...>>>`. Returns null type otherwise.
+/// `!spirv.ptr<!spirv.struct<!spirv.rtarray<...>>>`. Returns null type
+/// otherwise.
 static Type getRuntimeArrayElementType(Type type) {
   auto ptrType = type.dyn_cast<spirv::PointerType>();
   if (!ptrType)
@@ -154,9 +155,9 @@ static bool areSameBitwidthScalarType(Type a, Type b) {
 namespace {
 /// A class for analyzing aliased resources.
 ///
-/// Resources are expected to be spv.GlobalVarible that has a descriptor set and
-/// binding number. Such resources are of the type `!spv.ptr<!spv.struct<...>>`
-/// per Vulkan requirements.
+/// Resources are expected to be spirv.GlobalVarible that has a descriptor set
+/// and binding number. Such resources are of the type
+/// `!spirv.ptr<!spirv.struct<...>>` per Vulkan requirements.
 ///
 /// Right now, we only support the case that there is a single runtime array
 /// inside the struct.
@@ -410,7 +411,7 @@ struct ConvertAccessChain : public ConvertAliasResource<spirv::AccessChainOp> {
     }
 
     return rewriter.notifyMatchFailure(
-        acOp, "unsupported src/dst types for spv.AccessChain");
+        acOp, "unsupported src/dst types for spirv.AccessChain");
   }
 };
 
@@ -459,7 +460,7 @@ struct ConvertLoad : public ConvertAliasResource<spirv::LoadOp> {
 
       auto acOp = adaptor.getPtr().getDefiningOp<spirv::AccessChainOp>();
       if (!acOp)
-        return rewriter.notifyMatchFailure(loadOp, "ptr not spv.AccessChain");
+        return rewriter.notifyMatchFailure(loadOp, "ptr not spirv.AccessChain");
 
       auto i32Type = rewriter.getI32Type();
       Value oneValue = spirv::ConstantOp::getOne(i32Type, loc, rewriter);
@@ -477,7 +478,7 @@ struct ConvertLoad : public ConvertAliasResource<spirv::LoadOp> {
       }
 
       // Create a vector of the components and then cast back to the larger
-      // bitwidth element type. For spv.bitcast, the lower-numbered components
+      // bitwidth element type. For spirv.bitcast, the lower-numbered components
       // of the vector map to lower-ordered bits of the larger bitwidth element
       // type.
       Type vectorType = srcElemType;
@@ -493,7 +494,7 @@ struct ConvertLoad : public ConvertAliasResource<spirv::LoadOp> {
     }
 
     return rewriter.notifyMatchFailure(
-        loadOp, "unsupported src/dst types for spv.Load");
+        loadOp, "unsupported src/dst types for spirv.Load");
   }
 };
 

diff  --git a/mlir/lib/Dialect/SPIRV/Transforms/UpdateVCEPass.cpp b/mlir/lib/Dialect/SPIRV/Transforms/UpdateVCEPass.cpp
index d7cf3f5fcff5..33091b5f5575 100644
--- a/mlir/lib/Dialect/SPIRV/Transforms/UpdateVCEPass.cpp
+++ b/mlir/lib/Dialect/SPIRV/Transforms/UpdateVCEPass.cpp
@@ -102,7 +102,7 @@ void UpdateVCEPass::runOnOperation() {
 
   spirv::TargetEnvAttr targetAttr = spirv::lookupTargetEnv(module);
   if (!targetAttr) {
-    module.emitError("missing 'spv.target_env' attribute");
+    module.emitError("missing 'spirv.target_env' attribute");
     return signalPassFailure();
   }
 

diff  --git a/mlir/lib/Target/SPIRV/Deserialization/DeserializeOps.cpp b/mlir/lib/Target/SPIRV/Deserialization/DeserializeOps.cpp
index 2b81e91ec769..847a3aa5fc43 100644
--- a/mlir/lib/Target/SPIRV/Deserialization/DeserializeOps.cpp
+++ b/mlir/lib/Target/SPIRV/Deserialization/DeserializeOps.cpp
@@ -40,7 +40,7 @@ static inline spirv::Opcode extractOpcode(uint32_t word) {
 
 Value spirv::Deserializer::getValue(uint32_t id) {
   if (auto constInfo = getConstant(id)) {
-    // Materialize a `spv.Constant` op at every use site.
+    // Materialize a `spirv.Constant` op at every use site.
     return opBuilder.create<spirv::ConstantOp>(unknownLoc, constInfo->second,
                                                constInfo->first);
   }
@@ -149,7 +149,7 @@ LogicalResult spirv::Deserializer::processInstruction(
   case spirv::Opcode::OpSourceContinued:
   case spirv::Opcode::OpSourceExtension:
     // TODO: This is debug information embedded in the binary which should be
-    // translated into the spv.module.
+    // translated into the spirv.module.
     return success();
   case spirv::Opcode::OpTypeVoid:
   case spirv::Opcode::OpTypeBool:

diff  --git a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
index 604c71c9808d..8f075c5c491f 100644
--- a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
+++ b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.cpp
@@ -279,8 +279,8 @@ LogicalResult spirv::Deserializer::processDecoration(ArrayRef<uint32_t> words) {
       return emitError(unknownLoc, "OpDecoration with ")
              << decorationName << "needs a single target <id>";
     }
-    // Block decoration does not affect spv.struct type, but is still stored for
-    // verification.
+    // Block decoration does not affect spirv.struct type, but is still stored
+    // for verification.
     // TODO: Update StructType to contain this information since
     // it is needed for many validation rules.
     decorations[words[0]].set(symbol, opBuilder.getUnitAttr());
@@ -483,7 +483,8 @@ spirv::Deserializer::processFunctionEnd(ArrayRef<uint32_t> operands) {
   }
 
   // Wire up block arguments from OpPhi instructions.
-  // Put all structured control flow in spv.mlir.selection/spv.mlir.loop ops.
+  // Put all structured control flow in spirv.mlir.selection/spirv.mlir.loop
+  // ops.
   if (failed(wireUpBlockArgument()) || failed(structurizeControlFlow())) {
     return failure();
   }
@@ -563,7 +564,7 @@ spirv::Deserializer::processGlobalVariable(ArrayRef<uint32_t> operands) {
   auto ptrType = type.dyn_cast<spirv::PointerType>();
   if (!ptrType) {
     return emitError(unknownLoc,
-                     "expected a result type <id> to be a spv.ptr, found : ")
+                     "expected a result type <id> to be a spirv.ptr, found : ")
            << type;
   }
   wordIndex++;
@@ -1467,7 +1468,7 @@ Block *spirv::Deserializer::getOrCreateBlock(uint32_t id) {
   }
 
   // We don't know where this block will be placed finally (in a
-  // spv.mlir.selection or spv.mlir.loop or function). Create it into the
+  // spirv.mlir.selection or spirv.mlir.loop or function). Create it into the
   // function for now and sort out the proper place later.
   auto *block = curFunction->addBlock();
   LLVM_DEBUG(logger.startLine() << "[block] created block for id = " << id
@@ -1639,7 +1640,7 @@ LogicalResult spirv::Deserializer::processPhi(ArrayRef<uint32_t> operands) {
 
 namespace {
 /// A class for putting all blocks in a structured selection/loop in a
-/// spv.mlir.selection/spv.mlir.loop op.
+/// spirv.mlir.selection/spirv.mlir.loop op.
 class ControlFlowStructurizer {
 public:
 #ifndef NDEBUG
@@ -1660,18 +1661,19 @@ class ControlFlowStructurizer {
 
   /// Structurizes the loop at the given `headerBlock`.
   ///
-  /// This method will create an spv.mlir.loop op in the `mergeBlock` and move
-  /// all blocks in the structured loop into the spv.mlir.loop's region. All
+  /// This method will create an spirv.mlir.loop op in the `mergeBlock` and move
+  /// all blocks in the structured loop into the spirv.mlir.loop's region. All
   /// branches to the `headerBlock` will be redirected to the `mergeBlock`. This
   /// method will also update `mergeInfo` by remapping all blocks inside to the
   /// newly cloned ones inside structured control flow op's regions.
   LogicalResult structurize();
 
 private:
-  /// Creates a new spv.mlir.selection op at the beginning of the `mergeBlock`.
+  /// Creates a new spirv.mlir.selection op at the beginning of the
+  /// `mergeBlock`.
   spirv::SelectionOp createSelectionOp(uint32_t selectionControl);
 
-  /// Creates a new spv.mlir.loop op at the beginning of the `mergeBlock`.
+  /// Creates a new spirv.mlir.loop op at the beginning of the `mergeBlock`.
   spirv::LoopOp createLoopOp(uint32_t loopControl);
 
   /// Collects all blocks reachable from `headerBlock` except `mergeBlock`.
@@ -1684,7 +1686,7 @@ class ControlFlowStructurizer {
 
   Block *headerBlock;
   Block *mergeBlock;
-  Block *continueBlock; // nullptr for spv.mlir.selection
+  Block *continueBlock; // nullptr for spirv.mlir.selection
 
   SetVector<Block *> constructBlocks;
 
@@ -1838,8 +1840,8 @@ LogicalResult ControlFlowStructurizer::structurize() {
     for (BlockArgument blockArg : headerBlock->getArguments())
       mergeBlock->addArgument(blockArg.getType(), blockArg.getLoc());
 
-    // If the loop header block has block arguments, make sure the spv.Branch op
-    // matches.
+    // If the loop header block has block arguments, make sure the spirv.Branch
+    // op matches.
     SmallVector<Value, 4> blockArgs;
     if (!headerBlock->args_empty())
       blockArgs = {mergeBlock->args_begin(), mergeBlock->args_end()};
@@ -1917,7 +1919,7 @@ LogicalResult ControlFlowStructurizer::structurize() {
     // matching the function signature and used by the cloned blocks.
     if (isFnEntryBlock(block)) {
       LLVM_DEBUG(logger.startLine() << "[cf] changing entry block " << block
-                                    << " to only contain a spv.Branch op\n");
+                                    << " to only contain a spirv.Branch op\n");
       // Still keep the function entry block for the potential block arguments,
       // but replace all ops inside with a branch to the merge block.
       block->clear();

diff  --git a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h
index 784ec2b3e624..236f3a660298 100644
--- a/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h
+++ b/mlir/lib/Target/SPIRV/Deserialization/Deserializer.h
@@ -33,10 +33,10 @@ namespace spirv {
 ///
 /// This struct is used to track original structured control flow info from
 /// SPIR-V blob. This info will be used to create
-/// spv.mlir.selection/spv.mlir.loop later.
+/// spirv.mlir.selection/spirv.mlir.loop later.
 struct BlockMergeInfo {
   Block *mergeBlock;
-  Block *continueBlock; // nullptr for spv.mlir.selection
+  Block *continueBlock; // nullptr for spirv.mlir.selection
   Location loc;
   uint32_t control; // Selection/loop control
 
@@ -223,8 +223,8 @@ class Deserializer {
 
   /// Processes the OpVariable instructions at current `offset` into `binary`.
   /// It is expected that this method is used for variables that are to be
-  /// defined at module scope and will be deserialized into a spv.GlobalVariable
-  /// instruction.
+  /// defined at module scope and will be deserialized into a
+  /// spirv.GlobalVariable instruction.
   LogicalResult processGlobalVariable(ArrayRef<uint32_t> operands);
 
   /// Gets the global variable associated with a result <id> of OpVariable.
@@ -332,9 +332,9 @@ class Deserializer {
 
   // In SPIR-V, structured control flow is explicitly declared using merge
   // instructions (OpSelectionMerge and OpLoopMerge). In the SPIR-V dialect,
-  // we use spv.mlir.selection and spv.mlir.loop to group structured control
+  // we use spirv.mlir.selection and spirv.mlir.loop to group structured control
   // flow. The deserializer need to turn structured control flow marked with
-  // merge instructions into using spv.mlir.selection/spv.mlir.loop ops.
+  // merge instructions into using spirv.mlir.selection/spirv.mlir.loop ops.
   //
   // Because structured control flow can nest and the basic block order have
   // flexibility, we cannot isolate a structured selection/loop without
@@ -346,13 +346,14 @@ class Deserializer {
   //    target blocks.
   // 2. For each selection/loop header block, recursively get all basic blocks
   //    reachable (except the merge block) and put them in a newly created
-  //    spv.mlir.selection/spv.mlir.loop's region. Structured control flow
+  //    spirv.mlir.selection/spirv.mlir.loop's region. Structured control flow
   //    guarantees that we enter and exit in structured ways and the construct
   //    is nestable.
-  // 3. Put the new spv.mlir.selection/spv.mlir.loop op at the beginning of the
+  // 3. Put the new spirv.mlir.selection/spirv.mlir.loop op at the beginning of
+  // the
   //    old merge block and redirect all branches to the old header block to the
-  //    old merge block (which contains the spv.mlir.selection/spv.mlir.loop op
-  //    now).
+  //    old merge block (which contains the spirv.mlir.selection/spirv.mlir.loop
+  //    op now).
 
   /// For OpPhi instructions, we use block arguments to represent them. OpPhi
   /// encodes a list of (value, predecessor) pairs. At the time of handling the
@@ -398,8 +399,8 @@ class Deserializer {
   LogicalResult wireUpBlockArgument();
 
   /// Extracts blocks belonging to a structured selection/loop into a
-  /// spv.mlir.selection/spv.mlir.loop op. This method iterates until all blocks
-  /// declared as selection/loop headers are handled.
+  /// spirv.mlir.selection/spirv.mlir.loop op. This method iterates until all
+  /// blocks declared as selection/loop headers are handled.
   LogicalResult structurizeControlFlow();
 
   //===--------------------------------------------------------------------===//
@@ -409,8 +410,8 @@ class Deserializer {
   /// Get the Value associated with a result <id>.
   ///
   /// This method materializes normal constants and inserts "casting" ops
-  /// (`spv.mlir.addressof` and `spv.mlir.referenceof`) to turn an symbol into a
-  /// SSA value for handling uses of module scope constants/variables in
+  /// (`spirv.mlir.addressof` and `spirv.mlir.referenceof`) to turn an symbol
+  /// into a SSA value for handling uses of module scope constants/variables in
   /// functions.
   Value getValue(uint32_t id);
 
@@ -441,8 +442,8 @@ class Deserializer {
                                             StringRef opName, bool hasResult,
                                             unsigned numOperands);
 
-  /// Processes a OpUndef instruction. Adds a spv.Undef operation at the current
-  /// insertion point.
+  /// Processes a OpUndef instruction. Adds a spirv.Undef operation at the
+  /// current insertion point.
   LogicalResult processUndef(ArrayRef<uint32_t> operands);
 
   /// Method to dispatch to the specialized deserialization function for an
@@ -587,8 +588,8 @@ class Deserializer {
   // List of instructions that are processed in a deferred fashion (after an
   // initial processing of the entire binary). Some operations like
   // OpEntryPoint, and OpExecutionMode use forward references to function
-  // <id>s. In SPIR-V dialect the corresponding operations (spv.EntryPoint and
-  // spv.ExecutionMode) need these references resolved. So these instructions
+  // <id>s. In SPIR-V dialect the corresponding operations (spirv.EntryPoint and
+  // spirv.ExecutionMode) need these references resolved. So these instructions
   // are deserialized and stored for processing once the entire binary is
   // processed.
   SmallVector<std::pair<spirv::Opcode, ArrayRef<uint32_t>>, 4>

diff  --git a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
index 6197664e1e9b..d863ab475176 100644
--- a/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
+++ b/mlir/lib/Target/SPIRV/Serialization/SerializeOps.cpp
@@ -407,7 +407,7 @@ LogicalResult Serializer::processSelectionOp(spirv::SelectionOp selectionOp) {
     return failure();
 
   // There is nothing to do for the merge block in the selection, which just
-  // contains a spv.mlir.merge op, itself. But we need to have an OpLabel
+  // contains a spirv.mlir.merge op, itself. But we need to have an OpLabel
   // instruction to start a new SPIR-V block for ops following this SelectionOp.
   // The block should use the <id> for the merge block.
   encodeInstructionInto(functionBody, spirv::Opcode::OpLabel, {mergeID});
@@ -472,8 +472,8 @@ LogicalResult Serializer::processLoopOp(spirv::LoopOp loopOp) {
     return failure();
 
   // There is nothing to do for the merge block in the loop, which just contains
-  // a spv.mlir.merge op, itself. But we need to have an OpLabel instruction to
-  // start a new SPIR-V block for ops following this LoopOp. The block should
+  // a spirv.mlir.merge op, itself. But we need to have an OpLabel instruction
+  // to start a new SPIR-V block for ops following this LoopOp. The block should
   // use the <id> for the merge block.
   encodeInstructionInto(functionBody, spirv::Opcode::OpLabel, {mergeID});
   LLVM_DEBUG(llvm::dbgs() << "done merge ");
@@ -544,7 +544,7 @@ Serializer::processOp<spirv::EntryPointOp>(spirv::EntryPointOp op) {
   if (!funcID) {
     return op.emitError("missing <id> for function ")
            << op.getFn()
-           << "; function needs to be defined before spv.EntryPoint is "
+           << "; function needs to be defined before spirv.EntryPoint is "
               "serialized";
   }
   operands.push_back(funcID);
@@ -556,9 +556,10 @@ Serializer::processOp<spirv::EntryPointOp>(spirv::EntryPointOp op) {
     for (auto var : interface.getValue()) {
       auto id = getVariableID(var.cast<FlatSymbolRefAttr>().getValue());
       if (!id) {
-        return op.emitError("referencing undefined global variable."
-                            "spv.EntryPoint is at the end of spv.module. All "
-                            "referenced variables should already be defined");
+        return op.emitError(
+            "referencing undefined global variable."
+            "spirv.EntryPoint is at the end of spirv.module. All "
+            "referenced variables should already be defined");
       }
       operands.push_back(id);
     }
@@ -612,7 +613,7 @@ Serializer::processOp<spirv::FunctionCallOp>(spirv::FunctionCallOp op) {
 
   for (auto value : op.getArguments()) {
     auto valueID = getValueID(value);
-    assert(valueID && "cannot find a value for spv.FunctionCall");
+    assert(valueID && "cannot find a value for spirv.FunctionCall");
     operands.push_back(valueID);
   }
 

diff  --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
index e482b257ddf4..f39bfebfd768 100644
--- a/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
+++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.cpp
@@ -45,7 +45,7 @@ static Block *getStructuredControlFlowOpMergeBlock(Operation *op) {
 /// corresponding to the block arguments.
 static Block *getPhiIncomingBlock(Block *block) {
   // If the predecessor block in question is the entry block for a
-  // spv.mlir.loop, we jump to this spv.mlir.loop from its enclosing block.
+  // spirv.mlir.loop, we jump to this spirv.mlir.loop from its enclosing block.
   if (block->isEntryBlock()) {
     if (auto loopOp = dyn_cast<spirv::LoopOp>(block->getParentOp())) {
       // Then the incoming parent block for OpPhi should be the merge block of
@@ -1035,8 +1035,8 @@ LogicalResult Serializer::emitPhiForBlockArguments(Block *block) {
     // structure. It does not directly map to the incoming parent block for the
     // OpPhi instructions at SPIR-V binary level. This is because structured
     // control flow ops are serialized to multiple SPIR-V blocks. If there is a
-    // spv.mlir.selection/spv.mlir.loop op in the MLIR predecessor block, the
-    // branch op jumping to the OpPhi's block then resides in the previous
+    // spirv.mlir.selection/spirv.mlir.loop op in the MLIR predecessor block,
+    // the branch op jumping to the OpPhi's block then resides in the previous
     // structured control flow op's merge block.
     Block *spirvPredecessor = getPhiIncomingBlock(mlirPredecessor);
     LLVM_DEBUG(llvm::dbgs() << "  spirv predecessor ");

diff  --git a/mlir/lib/Target/SPIRV/Serialization/Serializer.h b/mlir/lib/Target/SPIRV/Serialization/Serializer.h
index a338111fd2f2..ab9b901fa269 100644
--- a/mlir/lib/Target/SPIRV/Serialization/Serializer.h
+++ b/mlir/lib/Target/SPIRV/Serialization/Serializer.h
@@ -116,7 +116,7 @@ class Serializer {
   LogicalResult
   processSpecConstantOperationOp(spirv::SpecConstantOperationOp op);
 
-  /// SPIR-V dialect supports OpUndef using spv.UndefOp that produces a SSA
+  /// SPIR-V dialect supports OpUndef using spirv.UndefOp that produces a SSA
   /// value to use with other operations. The SPIR-V spec recommends that
   /// OpUndef be generated at module level. The serialization generates an
   /// OpUndef for each type needed at module level.
@@ -424,10 +424,10 @@ class Serializer {
   ///   ...
   /// ^parent1:
   ///   ...
-  ///   spv.Branch ^phi(%val0: i32)
+  ///   spirv.Branch ^phi(%val0: i32)
   /// ^parent2:
   ///   ...
-  ///   spv.Branch ^phi(%val1: i32)
+  ///   spirv.Branch ^phi(%val1: i32)
   /// ```
   ///
   /// When we are serializing the `^phi` block, we need to emit at the beginning

diff  --git a/mlir/lib/Target/SPIRV/TranslateRegistration.cpp b/mlir/lib/Target/SPIRV/TranslateRegistration.cpp
index 9522c792f4c8..664b796e42e2 100644
--- a/mlir/lib/Target/SPIRV/TranslateRegistration.cpp
+++ b/mlir/lib/Target/SPIRV/TranslateRegistration.cpp
@@ -90,10 +90,10 @@ static LogicalResult serializeModule(ModuleOp module, raw_ostream &output) {
   module.walk([&](spirv::ModuleOp op) { spirvModules.push_back(op); });
 
   if (spirvModules.empty())
-    return module.emitError("found no 'spv.module' op");
+    return module.emitError("found no 'spirv.module' op");
 
   if (spirvModules.size() != 1)
-    return module.emitError("found more than one 'spv.module' op");
+    return module.emitError("found more than one 'spirv.module' op");
 
   if (failed(spirv::serialize(spirvModules[0], binary)))
     return failure();
@@ -128,10 +128,10 @@ static LogicalResult roundTripModule(ModuleOp srcModule, bool emitDebugInfo,
   auto spirvModules = srcModule.getOps<spirv::ModuleOp>();
 
   if (spirvModules.begin() == spirvModules.end())
-    return srcModule.emitError("found no 'spv.module' op");
+    return srcModule.emitError("found no 'spirv.module' op");
 
   if (std::next(spirvModules.begin()) != spirvModules.end())
-    return srcModule.emitError("found more than one 'spv.module' op");
+    return srcModule.emitError("found more than one 'spirv.module' op");
 
   spirv::SerializationOptions options;
   options.emitDebugInfo = emitDebugInfo;

diff  --git a/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir b/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
index 8b4de3da0208..16a7967c3a34 100644
--- a/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
+++ b/mlir/test/Conversion/ArithmeticToSPIRV/arithmetic-to-spirv.mlir
@@ -5,24 +5,24 @@
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64, Shader], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64, Shader], []>, #spirv.resource_limits<>>
 } {
 
 // Check integer operation conversions.
 // CHECK-LABEL: @int32_scalar
 func.func @int32_scalar(%lhs: i32, %rhs: i32) {
-  // CHECK: spv.IAdd %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IAdd %{{.*}}, %{{.*}}: i32
   %0 = arith.addi %lhs, %rhs: i32
-  // CHECK: spv.ISub %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.ISub %{{.*}}, %{{.*}}: i32
   %1 = arith.subi %lhs, %rhs: i32
-  // CHECK: spv.IMul %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IMul %{{.*}}, %{{.*}}: i32
   %2 = arith.muli %lhs, %rhs: i32
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: i32
   %3 = arith.divsi %lhs, %rhs: i32
-  // CHECK: spv.UDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UDiv %{{.*}}, %{{.*}}: i32
   %4 = arith.divui %lhs, %rhs: i32
-  // CHECK: spv.UMod %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UMod %{{.*}}, %{{.*}}: i32
   %5 = arith.remui %lhs, %rhs: i32
   return
 }
@@ -30,29 +30,29 @@ func.func @int32_scalar(%lhs: i32, %rhs: i32) {
 // CHECK-LABEL: @int32_scalar_srem
 // CHECK-SAME: (%[[LHS:.+]]: i32, %[[RHS:.+]]: i32)
 func.func @int32_scalar_srem(%lhs: i32, %rhs: i32) {
-  // CHECK: %[[LABS:.+]] = spv.GL.SAbs %[[LHS]] : i32
-  // CHECK: %[[RABS:.+]] = spv.GL.SAbs %[[RHS]] : i32
-  // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : i32
-  // CHECK:  %[[POS:.+]] = spv.IEqual %[[LHS]], %[[LABS]] : i32
-  // CHECK:  %[[NEG:.+]] = spv.SNegate %[[ABS]] : i32
-  // CHECK:      %{{.+}} = spv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
+  // CHECK: %[[LABS:.+]] = spirv.GL.SAbs %[[LHS]] : i32
+  // CHECK: %[[RABS:.+]] = spirv.GL.SAbs %[[RHS]] : i32
+  // CHECK:  %[[ABS:.+]] = spirv.UMod %[[LABS]], %[[RABS]] : i32
+  // CHECK:  %[[POS:.+]] = spirv.IEqual %[[LHS]], %[[LABS]] : i32
+  // CHECK:  %[[NEG:.+]] = spirv.SNegate %[[ABS]] : i32
+  // CHECK:      %{{.+}} = spirv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
   %0 = arith.remsi %lhs, %rhs: i32
   return
 }
 
 // CHECK-LABEL: @index_scalar
 func.func @index_scalar(%lhs: index, %rhs: index) {
-  // CHECK: spv.IAdd %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IAdd %{{.*}}, %{{.*}}: i32
   %0 = arith.addi %lhs, %rhs: index
-  // CHECK: spv.ISub %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.ISub %{{.*}}, %{{.*}}: i32
   %1 = arith.subi %lhs, %rhs: index
-  // CHECK: spv.IMul %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IMul %{{.*}}, %{{.*}}: i32
   %2 = arith.muli %lhs, %rhs: index
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: i32
   %3 = arith.divsi %lhs, %rhs: index
-  // CHECK: spv.UDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UDiv %{{.*}}, %{{.*}}: i32
   %4 = arith.divui %lhs, %rhs: index
-  // CHECK: spv.UMod %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UMod %{{.*}}, %{{.*}}: i32
   %5 = arith.remui %lhs, %rhs: index
   return
 }
@@ -62,12 +62,12 @@ func.func @index_scalar(%lhs: index, %rhs: index) {
 func.func @index_scalar_srem(%lhs: index, %rhs: index) {
   // CHECK: %[[LHS:.+]] = builtin.unrealized_conversion_cast %[[A]] : index to i32
   // CHECK: %[[RHS:.+]] = builtin.unrealized_conversion_cast %[[B]] : index to i32
-  // CHECK: %[[LABS:.+]] = spv.GL.SAbs %[[LHS]] : i32
-  // CHECK: %[[RABS:.+]] = spv.GL.SAbs %[[RHS]] : i32
-  // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : i32
-  // CHECK:  %[[POS:.+]] = spv.IEqual %[[LHS]], %[[LABS]] : i32
-  // CHECK:  %[[NEG:.+]] = spv.SNegate %[[ABS]] : i32
-  // CHECK:      %{{.+}} = spv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
+  // CHECK: %[[LABS:.+]] = spirv.GL.SAbs %[[LHS]] : i32
+  // CHECK: %[[RABS:.+]] = spirv.GL.SAbs %[[RHS]] : i32
+  // CHECK:  %[[ABS:.+]] = spirv.UMod %[[LABS]], %[[RABS]] : i32
+  // CHECK:  %[[POS:.+]] = spirv.IEqual %[[LHS]], %[[LABS]] : i32
+  // CHECK:  %[[NEG:.+]] = spirv.SNegate %[[ABS]] : i32
+  // CHECK:      %{{.+}} = spirv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
   %0 = arith.remsi %lhs, %rhs: index
   return
 }
@@ -76,11 +76,11 @@ func.func @index_scalar_srem(%lhs: index, %rhs: index) {
 // CHECK-LABEL: @int32_scalar_addui_carry
 // CHECK-SAME: (%[[LHS:.+]]: i32, %[[RHS:.+]]: i32)
 func.func @int32_scalar_addui_carry(%lhs: i32, %rhs: i32) -> (i32, i1) {
-  // CHECK-NEXT: %[[IAC:.+]] = spv.IAddCarry %[[LHS]], %[[RHS]] : !spv.struct<(i32, i32)>
-  // CHECK-DAG:  %[[SUM:.+]] = spv.CompositeExtract %[[IAC]][0 : i32] : !spv.struct<(i32, i32)>
-  // CHECK-DAG:  %[[C0:.+]]  = spv.CompositeExtract %[[IAC]][1 : i32] : !spv.struct<(i32, i32)>
-  // CHECK-DAG:  %[[ONE:.+]] = spv.Constant 1 : i32
-  // CHECK-NEXT: %[[C1:.+]]  = spv.IEqual %[[C0]], %[[ONE]] : i32
+  // CHECK-NEXT: %[[IAC:.+]] = spirv.IAddCarry %[[LHS]], %[[RHS]] : !spirv.struct<(i32, i32)>
+  // CHECK-DAG:  %[[SUM:.+]] = spirv.CompositeExtract %[[IAC]][0 : i32] : !spirv.struct<(i32, i32)>
+  // CHECK-DAG:  %[[C0:.+]]  = spirv.CompositeExtract %[[IAC]][1 : i32] : !spirv.struct<(i32, i32)>
+  // CHECK-DAG:  %[[ONE:.+]] = spirv.Constant 1 : i32
+  // CHECK-NEXT: %[[C1:.+]]  = spirv.IEqual %[[C0]], %[[ONE]] : i32
   // CHECK-NEXT: return %[[SUM]], %[[C1]] : i32, i1
   %sum, %carry = arith.addui_carry %lhs, %rhs: i32, i1
   return %sum, %carry : i32, i1
@@ -89,11 +89,11 @@ func.func @int32_scalar_addui_carry(%lhs: i32, %rhs: i32) -> (i32, i1) {
 // CHECK-LABEL: @int32_vector_addui_carry
 // CHECK-SAME: (%[[LHS:.+]]: vector<4xi32>, %[[RHS:.+]]: vector<4xi32>)
 func.func @int32_vector_addui_carry(%lhs: vector<4xi32>, %rhs: vector<4xi32>) -> (vector<4xi32>, vector<4xi1>) {
-  // CHECK-NEXT: %[[IAC:.+]] = spv.IAddCarry %[[LHS]], %[[RHS]] : !spv.struct<(vector<4xi32>, vector<4xi32>)>
-  // CHECK-DAG:  %[[SUM:.+]] = spv.CompositeExtract %[[IAC]][0 : i32] : !spv.struct<(vector<4xi32>, vector<4xi32>)>
-  // CHECK-DAG:  %[[C0:.+]]  = spv.CompositeExtract %[[IAC]][1 : i32] : !spv.struct<(vector<4xi32>, vector<4xi32>)>
-  // CHECK-DAG:  %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi32>
-  // CHECK-NEXT: %[[C1:.+]]  = spv.IEqual %[[C0]], %[[ONE]] : vector<4xi32>
+  // CHECK-NEXT: %[[IAC:.+]] = spirv.IAddCarry %[[LHS]], %[[RHS]] : !spirv.struct<(vector<4xi32>, vector<4xi32>)>
+  // CHECK-DAG:  %[[SUM:.+]] = spirv.CompositeExtract %[[IAC]][0 : i32] : !spirv.struct<(vector<4xi32>, vector<4xi32>)>
+  // CHECK-DAG:  %[[C0:.+]]  = spirv.CompositeExtract %[[IAC]][1 : i32] : !spirv.struct<(vector<4xi32>, vector<4xi32>)>
+  // CHECK-DAG:  %[[ONE:.+]] = spirv.Constant dense<1> : vector<4xi32>
+  // CHECK-NEXT: %[[C1:.+]]  = spirv.IEqual %[[C0]], %[[ONE]] : vector<4xi32>
   // CHECK-NEXT: return %[[SUM]], %[[C1]] : vector<4xi32>, vector<4xi1>
   %sum, %carry = arith.addui_carry %lhs, %rhs: vector<4xi32>, vector<4xi1>
   return %sum, %carry : vector<4xi32>, vector<4xi1>
@@ -102,7 +102,7 @@ func.func @int32_vector_addui_carry(%lhs: vector<4xi32>, %rhs: vector<4xi32>) ->
 // Check float unary operation conversions.
 // CHECK-LABEL: @float32_unary_scalar
 func.func @float32_unary_scalar(%arg0: f32) {
-  // CHECK: spv.FNegate %{{.*}}: f32
+  // CHECK: spirv.FNegate %{{.*}}: f32
   %0 = arith.negf %arg0 : f32
   return
 }
@@ -110,15 +110,15 @@ func.func @float32_unary_scalar(%arg0: f32) {
 // Check float binary operation conversions.
 // CHECK-LABEL: @float32_binary_scalar
 func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
-  // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %lhs, %rhs: f32
-  // CHECK: spv.FSub %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FSub %{{.*}}, %{{.*}}: f32
   %1 = arith.subf %lhs, %rhs: f32
-  // CHECK: spv.FMul %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FMul %{{.*}}, %{{.*}}: f32
   %2 = arith.mulf %lhs, %rhs: f32
-  // CHECK: spv.FDiv %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FDiv %{{.*}}, %{{.*}}: f32
   %3 = arith.divf %lhs, %rhs: f32
-  // CHECK: spv.FRem %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FRem %{{.*}}, %{{.*}}: f32
   %4 = arith.remf %lhs, %rhs: f32
   return
 }
@@ -126,9 +126,9 @@ func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
 // Check int vector types.
 // CHECK-LABEL: @int_vector234
 func.func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi8>
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: vector<2xi8>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
-  // CHECK: spv.UDiv %{{.*}}, %{{.*}}: vector<4xi64>
+  // CHECK: spirv.UDiv %{{.*}}, %{{.*}}: vector<4xi64>
   %1 = arith.divui %arg1, %arg1: vector<4xi64>
   return
 }
@@ -136,12 +136,12 @@ func.func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
 // CHECK-LABEL: @vector_srem
 // CHECK-SAME: (%[[LHS:.+]]: vector<3xi16>, %[[RHS:.+]]: vector<3xi16>)
 func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
-  // CHECK: %[[LABS:.+]] = spv.GL.SAbs %[[LHS]] : vector<3xi16>
-  // CHECK: %[[RABS:.+]] = spv.GL.SAbs %[[RHS]] : vector<3xi16>
-  // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
-  // CHECK:  %[[POS:.+]] = spv.IEqual %[[LHS]], %[[LABS]] : vector<3xi16>
-  // CHECK:  %[[NEG:.+]] = spv.SNegate %[[ABS]] : vector<3xi16>
-  // CHECK:      %{{.+}} = spv.Select %[[POS]], %[[ABS]], %[[NEG]] : vector<3xi1>, vector<3xi16>
+  // CHECK: %[[LABS:.+]] = spirv.GL.SAbs %[[LHS]] : vector<3xi16>
+  // CHECK: %[[RABS:.+]] = spirv.GL.SAbs %[[RHS]] : vector<3xi16>
+  // CHECK:  %[[ABS:.+]] = spirv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
+  // CHECK:  %[[POS:.+]] = spirv.IEqual %[[LHS]], %[[LABS]] : vector<3xi16>
+  // CHECK:  %[[NEG:.+]] = spirv.SNegate %[[ABS]] : vector<3xi16>
+  // CHECK:      %{{.+}} = spirv.Select %[[POS]], %[[ABS]], %[[NEG]] : vector<3xi1>, vector<3xi16>
   %0 = arith.remsi %arg0, %arg1: vector<3xi16>
   return
 }
@@ -149,16 +149,16 @@ func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
 // Check float vector types.
 // CHECK-LABEL: @float_vector234
 func.func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
-  // CHECK: spv.FAdd %{{.*}}, %{{.*}}: vector<2xf16>
+  // CHECK: spirv.FAdd %{{.*}}, %{{.*}}: vector<2xf16>
   %0 = arith.addf %arg0, %arg0: vector<2xf16>
-  // CHECK: spv.FMul %{{.*}}, %{{.*}}: vector<3xf64>
+  // CHECK: spirv.FMul %{{.*}}, %{{.*}}: vector<3xf64>
   %1 = arith.mulf %arg1, %arg1: vector<3xf64>
   return
 }
 
 // CHECK-LABEL: @one_elem_vector
 func.func @one_elem_vector(%arg0: vector<1xi32>) {
-  // CHECK: spv.IAdd %{{.+}}, %{{.+}}: i32
+  // CHECK: spirv.IAdd %{{.+}}, %{{.+}}: i32
   %0 = arith.addi %arg0, %arg0: vector<1xi32>
   return
 }
@@ -183,23 +183,23 @@ func.func @unsupported_2x2elem_vector(%arg0: vector<2x2xi32>) {
 
 // Check that types are converted to 32-bit when no special capabilities.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @int_vector23
 func.func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi32>
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: vector<2xi32>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<3xi32>
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: vector<3xi32>
   %1 = arith.divsi %arg1, %arg1: vector<3xi16>
   return
 }
 
 // CHECK-LABEL: @float_scalar
 func.func @float_scalar(%arg0: f16, %arg1: f64) {
-  // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %arg0, %arg0: f16
-  // CHECK: spv.FMul %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FMul %{{.*}}, %{{.*}}: f32
   %1 = arith.mulf %arg1, %arg1: f64
   return
 }
@@ -211,7 +211,7 @@ func.func @float_scalar(%arg0: f16, %arg1: f64) {
 // Check that types are converted to 32-bit when no special capabilities that
 // are not supported.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 func.func @int_vector4_invalid(%arg0: vector<4xi64>) {
@@ -229,71 +229,71 @@ func.func @int_vector4_invalid(%arg0: vector<4xi64>) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @bitwise_scalar
 func.func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
-  // CHECK: spv.BitwiseAnd
+  // CHECK: spirv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : i32
-  // CHECK: spv.BitwiseOr
+  // CHECK: spirv.BitwiseOr
   %1 = arith.ori %arg0, %arg1 : i32
-  // CHECK: spv.BitwiseXor
+  // CHECK: spirv.BitwiseXor
   %2 = arith.xori %arg0, %arg1 : i32
   return
 }
 
 // CHECK-LABEL: @bitwise_vector
 func.func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
-  // CHECK: spv.BitwiseAnd
+  // CHECK: spirv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.BitwiseOr
+  // CHECK: spirv.BitwiseOr
   %1 = arith.ori %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.BitwiseXor
+  // CHECK: spirv.BitwiseXor
   %2 = arith.xori %arg0, %arg1 : vector<4xi32>
   return
 }
 
 // CHECK-LABEL: @logical_scalar
 func.func @logical_scalar(%arg0 : i1, %arg1 : i1) {
-  // CHECK: spv.LogicalAnd
+  // CHECK: spirv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : i1
-  // CHECK: spv.LogicalOr
+  // CHECK: spirv.LogicalOr
   %1 = arith.ori %arg0, %arg1 : i1
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %2 = arith.xori %arg0, %arg1 : i1
   return
 }
 
 // CHECK-LABEL: @logical_vector
 func.func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
-  // CHECK: spv.LogicalAnd
+  // CHECK: spirv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi1>
-  // CHECK: spv.LogicalOr
+  // CHECK: spirv.LogicalOr
   %1 = arith.ori %arg0, %arg1 : vector<4xi1>
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %2 = arith.xori %arg0, %arg1 : vector<4xi1>
   return
 }
 
 // CHECK-LABEL: @shift_scalar
 func.func @shift_scalar(%arg0 : i32, %arg1 : i32) {
-  // CHECK: spv.ShiftLeftLogical
+  // CHECK: spirv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : i32
-  // CHECK: spv.ShiftRightArithmetic
+  // CHECK: spirv.ShiftRightArithmetic
   %1 = arith.shrsi %arg0, %arg1 : i32
-  // CHECK: spv.ShiftRightLogical
+  // CHECK: spirv.ShiftRightLogical
   %2 = arith.shrui %arg0, %arg1 : i32
   return
 }
 
 // CHECK-LABEL: @shift_vector
 func.func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
-  // CHECK: spv.ShiftLeftLogical
+  // CHECK: spirv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.ShiftRightArithmetic
+  // CHECK: spirv.ShiftRightArithmetic
   %1 = arith.shrsi %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.ShiftRightLogical
+  // CHECK: spirv.ShiftRightLogical
   %2 = arith.shrui %arg0, %arg1 : vector<4xi32>
   return
 }
@@ -307,34 +307,34 @@ func.func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpf
 func.func @cmpf(%arg0 : f32, %arg1 : f32) {
-  // CHECK: spv.FOrdEqual
+  // CHECK: spirv.FOrdEqual
   %1 = arith.cmpf oeq, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdGreaterThan
+  // CHECK: spirv.FOrdGreaterThan
   %2 = arith.cmpf ogt, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdGreaterThanEqual
+  // CHECK: spirv.FOrdGreaterThanEqual
   %3 = arith.cmpf oge, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdLessThan
+  // CHECK: spirv.FOrdLessThan
   %4 = arith.cmpf olt, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdLessThanEqual
+  // CHECK: spirv.FOrdLessThanEqual
   %5 = arith.cmpf ole, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdNotEqual
+  // CHECK: spirv.FOrdNotEqual
   %6 = arith.cmpf one, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordEqual
+  // CHECK: spirv.FUnordEqual
   %7 = arith.cmpf ueq, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordGreaterThan
+  // CHECK: spirv.FUnordGreaterThan
   %8 = arith.cmpf ugt, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordGreaterThanEqual
+  // CHECK: spirv.FUnordGreaterThanEqual
   %9 = arith.cmpf uge, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordLessThan
+  // CHECK: spirv.FUnordLessThan
   %10 = arith.cmpf ult, %arg0, %arg1 : f32
   // CHECK: FUnordLessThanEqual
   %11 = arith.cmpf ule, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordNotEqual
+  // CHECK: spirv.FUnordNotEqual
   %12 = arith.cmpf une, %arg0, %arg1 : f32
   return
 }
@@ -343,16 +343,16 @@ func.func @cmpf(%arg0 : f32, %arg1 : f32) {
 
 // -----
 
-// With Kernel capability, we can convert NaN check to spv.Ordered/spv.Unordered.
+// With Kernel capability, we can convert NaN check to spirv.Ordered/spirv.Unordered.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Kernel], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpf
 func.func @cmpf(%arg0 : f32, %arg1 : f32) {
-  // CHECK: spv.Ordered
+  // CHECK: spirv.Ordered
   %0 = arith.cmpf ord, %arg0, %arg1 : f32
-  // CHECK: spv.Unordered
+  // CHECK: spirv.Unordered
   %1 = arith.cmpf uno, %arg0, %arg1 : f32
   return
 }
@@ -361,23 +361,23 @@ func.func @cmpf(%arg0 : f32, %arg1 : f32) {
 
 // -----
 
-// Without Kernel capability, we need to convert NaN check to spv.IsNan.
+// Without Kernel capability, we need to convert NaN check to spirv.IsNan.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpf
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
 func.func @cmpf(%arg0 : f32, %arg1 : f32) {
-  // CHECK:      %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
-  // CHECK-NEXT: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
-  // CHECK-NEXT: %[[OR:.+]] = spv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
-  // CHECK-NEXT: %{{.+}} = spv.LogicalNot %[[OR]] : i1
+  // CHECK:      %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : f32
+  // CHECK-NEXT: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : f32
+  // CHECK-NEXT: %[[OR:.+]] = spirv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
+  // CHECK-NEXT: %{{.+}} = spirv.LogicalNot %[[OR]] : i1
   %0 = arith.cmpf ord, %arg0, %arg1 : f32
 
-  // CHECK-NEXT: %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
-  // CHECK-NEXT: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
-  // CHECK-NEXT: %{{.+}} = spv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
+  // CHECK-NEXT: %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : f32
+  // CHECK-NEXT: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : f32
+  // CHECK-NEXT: %{{.+}} = spirv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
   %1 = arith.cmpf uno, %arg0, %arg1 : f32
   return
 }
@@ -391,99 +391,99 @@ func.func @cmpf(%arg0 : f32, %arg1 : f32) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpi
 func.func @cmpi(%arg0 : i32, %arg1 : i32) {
-  // CHECK: spv.IEqual
+  // CHECK: spirv.IEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i32
-  // CHECK: spv.INotEqual
+  // CHECK: spirv.INotEqual
   %1 = arith.cmpi ne, %arg0, %arg1 : i32
-  // CHECK: spv.SLessThan
+  // CHECK: spirv.SLessThan
   %2 = arith.cmpi slt, %arg0, %arg1 : i32
-  // CHECK: spv.SLessThanEqual
+  // CHECK: spirv.SLessThanEqual
   %3 = arith.cmpi sle, %arg0, %arg1 : i32
-  // CHECK: spv.SGreaterThan
+  // CHECK: spirv.SGreaterThan
   %4 = arith.cmpi sgt, %arg0, %arg1 : i32
-  // CHECK: spv.SGreaterThanEqual
+  // CHECK: spirv.SGreaterThanEqual
   %5 = arith.cmpi sge, %arg0, %arg1 : i32
-  // CHECK: spv.ULessThan
+  // CHECK: spirv.ULessThan
   %6 = arith.cmpi ult, %arg0, %arg1 : i32
-  // CHECK: spv.ULessThanEqual
+  // CHECK: spirv.ULessThanEqual
   %7 = arith.cmpi ule, %arg0, %arg1 : i32
-  // CHECK: spv.UGreaterThan
+  // CHECK: spirv.UGreaterThan
   %8 = arith.cmpi ugt, %arg0, %arg1 : i32
-  // CHECK: spv.UGreaterThanEqual
+  // CHECK: spirv.UGreaterThanEqual
   %9 = arith.cmpi uge, %arg0, %arg1 : i32
   return
 }
 
 // CHECK-LABEL: @vec1cmpi
 func.func @vec1cmpi(%arg0 : vector<1xi32>, %arg1 : vector<1xi32>) {
-  // CHECK: spv.ULessThan
+  // CHECK: spirv.ULessThan
   %0 = arith.cmpi ult, %arg0, %arg1 : vector<1xi32>
-  // CHECK: spv.SGreaterThan
+  // CHECK: spirv.SGreaterThan
   %1 = arith.cmpi sgt, %arg0, %arg1 : vector<1xi32>
   return
 }
 
 // CHECK-LABEL: @boolcmpi_equality
 func.func @boolcmpi_equality(%arg0 : i1, %arg1 : i1) {
-  // CHECK: spv.LogicalEqual
+  // CHECK: spirv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i1
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %1 = arith.cmpi ne, %arg0, %arg1 : i1
   return
 }
 
 // CHECK-LABEL: @boolcmpi_unsigned
 func.func @boolcmpi_unsigned(%arg0 : i1, %arg1 : i1) {
-  // CHECK-COUNT-2: spv.Select
-  // CHECK: spv.UGreaterThanEqual
+  // CHECK-COUNT-2: spirv.Select
+  // CHECK: spirv.UGreaterThanEqual
   %0 = arith.cmpi uge, %arg0, %arg1 : i1
-  // CHECK-COUNT-2: spv.Select
-  // CHECK: spv.ULessThan
+  // CHECK-COUNT-2: spirv.Select
+  // CHECK: spirv.ULessThan
   %1 = arith.cmpi ult, %arg0, %arg1 : i1
   return
 }
 
 // CHECK-LABEL: @vec1boolcmpi_equality
 func.func @vec1boolcmpi_equality(%arg0 : vector<1xi1>, %arg1 : vector<1xi1>) {
-  // CHECK: spv.LogicalEqual
+  // CHECK: spirv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : vector<1xi1>
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %1 = arith.cmpi ne, %arg0, %arg1 : vector<1xi1>
   return
 }
 
 // CHECK-LABEL: @vec1boolcmpi_unsigned
 func.func @vec1boolcmpi_unsigned(%arg0 : vector<1xi1>, %arg1 : vector<1xi1>) {
-  // CHECK-COUNT-2: spv.Select
-  // CHECK: spv.UGreaterThanEqual
+  // CHECK-COUNT-2: spirv.Select
+  // CHECK: spirv.UGreaterThanEqual
   %0 = arith.cmpi uge, %arg0, %arg1 : vector<1xi1>
-  // CHECK-COUNT-2: spv.Select
-  // CHECK: spv.ULessThan
+  // CHECK-COUNT-2: spirv.Select
+  // CHECK: spirv.ULessThan
   %1 = arith.cmpi ult, %arg0, %arg1 : vector<1xi1>
   return
 }
 
 // CHECK-LABEL: @vecboolcmpi_equality
 func.func @vecboolcmpi_equality(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
-  // CHECK: spv.LogicalEqual
+  // CHECK: spirv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : vector<4xi1>
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %1 = arith.cmpi ne, %arg0, %arg1 : vector<4xi1>
   return
 }
 
 // CHECK-LABEL: @vecboolcmpi_unsigned
 func.func @vecboolcmpi_unsigned(%arg0 : vector<3xi1>, %arg1 : vector<3xi1>) {
-  // CHECK-COUNT-2: spv.Select
-  // CHECK: spv.UGreaterThanEqual
+  // CHECK-COUNT-2: spirv.Select
+  // CHECK: spirv.UGreaterThanEqual
   %0 = arith.cmpi uge, %arg0, %arg1 : vector<3xi1>
-  // CHECK-COUNT-2: spv.Select
-  // CHECK: spv.ULessThan
+  // CHECK-COUNT-2: spirv.Select
+  // CHECK: spirv.ULessThan
   %1 = arith.cmpi ult, %arg0, %arg1 : vector<3xi1>
   return
 }
@@ -498,70 +498,70 @@ func.func @vecboolcmpi_unsigned(%arg0 : vector<3xi1>, %arg1 : vector<3xi1>) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @constant
 func.func @constant() {
-  // CHECK: spv.Constant true
+  // CHECK: spirv.Constant true
   %0 = arith.constant true
-  // CHECK: spv.Constant 42 : i32
+  // CHECK: spirv.Constant 42 : i32
   %1 = arith.constant 42 : i32
-  // CHECK: spv.Constant 5.000000e-01 : f32
+  // CHECK: spirv.Constant 5.000000e-01 : f32
   %2 = arith.constant 0.5 : f32
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi32>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi32>
   %3 = arith.constant dense<[2, 3]> : vector<2xi32>
-  // CHECK: spv.Constant 1 : i32
+  // CHECK: spirv.Constant 1 : i32
   %4 = arith.constant 1 : index
-  // CHECK: spv.Constant dense<1> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<1> : tensor<6xi32> : !spirv.array<6 x i32>
   %5 = arith.constant dense<1> : tensor<2x3xi32>
-  // CHECK: spv.Constant dense<1.000000e+00> : tensor<6xf32> : !spv.array<6 x f32>
+  // CHECK: spirv.Constant dense<1.000000e+00> : tensor<6xf32> : !spirv.array<6 x f32>
   %6 = arith.constant dense<1.0> : tensor<2x3xf32>
-  // CHECK: spv.Constant dense<{{\[}}1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf32> : !spv.array<6 x f32>
+  // CHECK: spirv.Constant dense<{{\[}}1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf32> : !spirv.array<6 x f32>
   %7 = arith.constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32>
-  // CHECK: spv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spirv.array<6 x i32>
   %8 = arith.constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>
-  // CHECK: spv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spirv.array<6 x i32>
   %9 =  arith.constant dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>
-  // CHECK: spv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spirv.array<6 x i32>
   %10 =  arith.constant dense<[1, 2, 3, 4, 5, 6]> : tensor<6xi32>
   return
 }
 
 // CHECK-LABEL: @constant_16bit
 func.func @constant_16bit() {
-  // CHECK: spv.Constant 4 : i16
+  // CHECK: spirv.Constant 4 : i16
   %0 = arith.constant 4 : i16
-  // CHECK: spv.Constant 5.000000e+00 : f16
+  // CHECK: spirv.Constant 5.000000e+00 : f16
   %1 = arith.constant 5.0 : f16
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi16>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi16>
   %2 = arith.constant dense<[2, 3]> : vector<2xi16>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf16> : !spv.array<5 x f16>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf16> : !spirv.array<5 x f16>
   %3 = arith.constant dense<4.0> : tensor<5xf16>
   return
 }
 
 // CHECK-LABEL: @constant_64bit
 func.func @constant_64bit() {
-  // CHECK: spv.Constant 4 : i64
+  // CHECK: spirv.Constant 4 : i64
   %0 = arith.constant 4 : i64
-  // CHECK: spv.Constant 5.000000e+00 : f64
+  // CHECK: spirv.Constant 5.000000e+00 : f64
   %1 = arith.constant 5.0 : f64
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi64>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi64>
   %2 = arith.constant dense<[2, 3]> : vector<2xi64>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf64> : !spv.array<5 x f64>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf64> : !spirv.array<5 x f64>
   %3 = arith.constant dense<4.0> : tensor<5xf64>
   return
 }
 
 // CHECK-LABEL: @constant_size1
 func.func @constant_size1() {
-  // CHECK: spv.Constant true
+  // CHECK: spirv.Constant true
   %0 = arith.constant dense<true> : tensor<1xi1>
-  // CHECK: spv.Constant 4 : i64
+  // CHECK: spirv.Constant 4 : i64
   %1 = arith.constant dense<4> : vector<1xi64>
-  // CHECK: spv.Constant 5.000000e+00 : f64
+  // CHECK: spirv.Constant 5.000000e+00 : f64
   %2 = arith.constant dense<5.0> : tensor<1xf64>
   return
 }
@@ -572,72 +572,72 @@ func.func @constant_size1() {
 
 // Check that constants are converted to 32-bit when no special capability.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @constant_16bit
 func.func @constant_16bit() {
-  // CHECK: spv.Constant 4 : i32
+  // CHECK: spirv.Constant 4 : i32
   %0 = arith.constant 4 : i16
-  // CHECK: spv.Constant 5.000000e+00 : f32
+  // CHECK: spirv.Constant 5.000000e+00 : f32
   %1 = arith.constant 5.0 : f16
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi32>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi32>
   %2 = arith.constant dense<[2, 3]> : vector<2xi16>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf32> : !spv.array<5 x f32>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf32> : !spirv.array<5 x f32>
   %3 = arith.constant dense<4.0> : tensor<5xf16>
-  // CHECK: spv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spv.array<4 x f32>
+  // CHECK: spirv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spirv.array<4 x f32>
   %4 = arith.constant dense<[[1.0, 2.0], [3.0, 4.0]]> : tensor<2x2xf16>
   return
 }
 
 // CHECK-LABEL: @constant_64bit
 func.func @constant_64bit() {
-  // CHECK: spv.Constant 4 : i32
+  // CHECK: spirv.Constant 4 : i32
   %0 = arith.constant 4 : i64
-  // CHECK: spv.Constant 5.000000e+00 : f32
+  // CHECK: spirv.Constant 5.000000e+00 : f32
   %1 = arith.constant 5.0 : f64
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi32>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi32>
   %2 = arith.constant dense<[2, 3]> : vector<2xi64>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf32> : !spv.array<5 x f32>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf32> : !spirv.array<5 x f32>
   %3 = arith.constant dense<4.0> : tensor<5xf64>
-  // CHECK: spv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spv.array<4 x f32>
+  // CHECK: spirv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spirv.array<4 x f32>
   %4 = arith.constant dense<[[1.0, 2.0], [3.0, 4.0]]> : tensor<2x2xf16>
   return
 }
 
 // CHECK-LABEL: @constant_size1
 func.func @constant_size1() {
-  // CHECK: spv.Constant 4 : i32
+  // CHECK: spirv.Constant 4 : i32
   %0 = arith.constant dense<4> : vector<1xi64>
-  // CHECK: spv.Constant 5.000000e+00 : f32
+  // CHECK: spirv.Constant 5.000000e+00 : f32
   %1 = arith.constant dense<5.0> : tensor<1xf64>
   return
 }
 
 // CHECK-LABEL: @corner_cases
 func.func @corner_cases() {
-  // CHECK: %{{.*}} = spv.Constant -1 : i32
+  // CHECK: %{{.*}} = spirv.Constant -1 : i32
   %0 = arith.constant 4294967295  : i64 // 2^32 - 1
-  // CHECK: %{{.*}} = spv.Constant 2147483647 : i32
+  // CHECK: %{{.*}} = spirv.Constant 2147483647 : i32
   %1 = arith.constant 2147483647  : i64 // 2^31 - 1
-  // CHECK: %{{.*}} = spv.Constant -2147483648 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2147483648 : i32
   %2 = arith.constant 2147483648  : i64 // 2^31
-  // CHECK: %{{.*}} = spv.Constant -2147483648 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2147483648 : i32
   %3 = arith.constant -2147483648 : i64 // -2^31
 
-  // CHECK: %{{.*}} = spv.Constant -1 : i32
+  // CHECK: %{{.*}} = spirv.Constant -1 : i32
   %5 = arith.constant -1 : i64
-  // CHECK: %{{.*}} = spv.Constant -2 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2 : i32
   %6 = arith.constant -2 : i64
-  // CHECK: %{{.*}} = spv.Constant -1 : i32
+  // CHECK: %{{.*}} = spirv.Constant -1 : i32
   %7 = arith.constant -1 : index
-  // CHECK: %{{.*}} = spv.Constant -2 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2 : i32
   %8 = arith.constant -2 : index
 
 
-  // CHECK: spv.Constant false
+  // CHECK: spirv.Constant false
   %9 = arith.constant false
-  // CHECK: spv.Constant true
+  // CHECK: spirv.Constant true
   %10 = arith.constant true
 
   return
@@ -663,244 +663,244 @@ func.func @unsupported_cases() {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: index_cast1
 func.func @index_cast1(%arg0: i16) {
-  // CHECK: spv.SConvert %{{.+}} : i16 to i32
+  // CHECK: spirv.SConvert %{{.+}} : i16 to i32
   %0 = arith.index_cast %arg0 : i16 to index
   return
 }
 
 // CHECK-LABEL: index_cast2
 func.func @index_cast2(%arg0: index) {
-  // CHECK: spv.SConvert %{{.+}} : i32 to i16
+  // CHECK: spirv.SConvert %{{.+}} : i32 to i16
   %0 = arith.index_cast %arg0 : index to i16
   return
 }
 
 // CHECK-LABEL: index_cast3
 func.func @index_cast3(%arg0: i32) {
-  // CHECK-NOT: spv.SConvert
+  // CHECK-NOT: spirv.SConvert
   %0 = arith.index_cast %arg0 : i32 to index
   return
 }
 
 // CHECK-LABEL: index_cast4
 func.func @index_cast4(%arg0: index) {
-  // CHECK-NOT: spv.SConvert
+  // CHECK-NOT: spirv.SConvert
   %0 = arith.index_cast %arg0 : index to i32
   return
 }
 
 // CHECK-LABEL: @bit_cast
 func.func @bit_cast(%arg0: vector<2xf32>, %arg1: i64) {
-  // CHECK: spv.Bitcast %{{.+}} : vector<2xf32> to vector<2xi32>
+  // CHECK: spirv.Bitcast %{{.+}} : vector<2xf32> to vector<2xi32>
   %0 = arith.bitcast %arg0 : vector<2xf32> to vector<2xi32>
-  // CHECK: spv.Bitcast %{{.+}} : i64 to f64
+  // CHECK: spirv.Bitcast %{{.+}} : i64 to f64
   %1 = arith.bitcast %arg1 : i64 to f64
   return
 }
 
 // CHECK-LABEL: @fpext1
 func.func @fpext1(%arg0: f16) -> f64 {
-  // CHECK: spv.FConvert %{{.*}} : f16 to f64
+  // CHECK: spirv.FConvert %{{.*}} : f16 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fpext2
 func.func @fpext2(%arg0 : f32) -> f64 {
-  // CHECK: spv.FConvert %{{.*}} : f32 to f64
+  // CHECK: spirv.FConvert %{{.*}} : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fptrunc1
 func.func @fptrunc1(%arg0 : f64) -> f16 {
-  // CHECK: spv.FConvert %{{.*}} : f64 to f16
+  // CHECK: spirv.FConvert %{{.*}} : f64 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @fptrunc2
 func.func @fptrunc2(%arg0: f32) -> f16 {
-  // CHECK: spv.FConvert %{{.*}} : f32 to f16
+  // CHECK: spirv.FConvert %{{.*}} : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @sitofp1
 func.func @sitofp1(%arg0 : i32) -> f32 {
-  // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
+  // CHECK: spirv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @sitofp2
 func.func @sitofp2(%arg0 : i64) -> f64 {
-  // CHECK: spv.ConvertSToF %{{.*}} : i64 to f64
+  // CHECK: spirv.ConvertSToF %{{.*}} : i64 to f64
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @uitofp_i16_f32
 func.func @uitofp_i16_f32(%arg0: i16) -> f32 {
-  // CHECK: spv.ConvertUToF %{{.*}} : i16 to f32
+  // CHECK: spirv.ConvertUToF %{{.*}} : i16 to f32
   %0 = arith.uitofp %arg0 : i16 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i32_f32
 func.func @uitofp_i32_f32(%arg0 : i32) -> f32 {
-  // CHECK: spv.ConvertUToF %{{.*}} : i32 to f32
+  // CHECK: spirv.ConvertUToF %{{.*}} : i32 to f32
   %0 = arith.uitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i1_f32
 func.func @uitofp_i1_f32(%arg0 : i1) -> f32 {
-  // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f32
+  // CHECK: %[[ZERO:.+]] = spirv.Constant 0.000000e+00 : f32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f32
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f32
   %0 = arith.uitofp %arg0 : i1 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i1_f64
 func.func @uitofp_i1_f64(%arg0 : i1) -> f64 {
-  // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f64
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f64
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f64
+  // CHECK: %[[ZERO:.+]] = spirv.Constant 0.000000e+00 : f64
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f64
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f64
   %0 = arith.uitofp %arg0 : i1 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @uitofp_vec_i1_f32
 func.func @uitofp_vec_i1_f32(%arg0 : vector<4xi1>) -> vector<4xf32> {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0.000000e+00> : vector<4xf32>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<4xf32>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf32>
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0.000000e+00> : vector<4xf32>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<4xf32>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf32>
   %0 = arith.uitofp %arg0 : vector<4xi1> to vector<4xf32>
   return %0 : vector<4xf32>
 }
 
 // CHECK-LABEL: @uitofp_vec_i1_f64
-spv.func @uitofp_vec_i1_f64(%arg0: vector<4xi1>) -> vector<4xf64> "None" {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0.000000e+00> : vector<4xf64>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<4xf64>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf64>
-  %0 = spv.Constant dense<0.000000e+00> : vector<4xf64>
-  %1 = spv.Constant dense<1.000000e+00> : vector<4xf64>
-  %2 = spv.Select %arg0, %1, %0 : vector<4xi1>, vector<4xf64>
-  spv.ReturnValue %2 : vector<4xf64>
+spirv.func @uitofp_vec_i1_f64(%arg0: vector<4xi1>) -> vector<4xf64> "None" {
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0.000000e+00> : vector<4xf64>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<4xf64>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf64>
+  %0 = spirv.Constant dense<0.000000e+00> : vector<4xf64>
+  %1 = spirv.Constant dense<1.000000e+00> : vector<4xf64>
+  %2 = spirv.Select %arg0, %1, %0 : vector<4xi1>, vector<4xf64>
+  spirv.ReturnValue %2 : vector<4xf64>
 }
 
 // CHECK-LABEL: @sexti1
 func.func @sexti1(%arg0: i16) -> i64 {
-  // CHECK: spv.SConvert %{{.*}} : i16 to i64
+  // CHECK: spirv.SConvert %{{.*}} : i16 to i64
   %0 = arith.extsi %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @sexti2
 func.func @sexti2(%arg0 : i32) -> i64 {
-  // CHECK: spv.SConvert %{{.*}} : i32 to i64
+  // CHECK: spirv.SConvert %{{.*}} : i32 to i64
   %0 = arith.extsi %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti1
 func.func @zexti1(%arg0: i16) -> i64 {
-  // CHECK: spv.UConvert %{{.*}} : i16 to i64
+  // CHECK: spirv.UConvert %{{.*}} : i16 to i64
   %0 = arith.extui %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti2
 func.func @zexti2(%arg0 : i32) -> i64 {
-  // CHECK: spv.UConvert %{{.*}} : i32 to i64
+  // CHECK: spirv.UConvert %{{.*}} : i32 to i64
   %0 = arith.extui %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti3
 func.func @zexti3(%arg0 : i1) -> i32 {
-  // CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, i32
+  // CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, i32
   %0 = arith.extui %arg0 : i1 to i32
   return %0 : i32
 }
 
 // CHECK-LABEL: @zexti4
 func.func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi32>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi32>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi32>
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0> : vector<4xi32>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1> : vector<4xi32>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi32>
   %0 = arith.extui %arg0 : vector<4xi1> to vector<4xi32>
   return %0 : vector<4xi32>
 }
 
 // CHECK-LABEL: @zexti5
 func.func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi64>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi64>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi64>
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0> : vector<4xi64>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1> : vector<4xi64>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi64>
   %0 = arith.extui %arg0 : vector<4xi1> to vector<4xi64>
   return %0 : vector<4xi64>
 }
 
 // CHECK-LABEL: @trunci1
 func.func @trunci1(%arg0 : i64) -> i16 {
-  // CHECK: spv.SConvert %{{.*}} : i64 to i16
+  // CHECK: spirv.SConvert %{{.*}} : i64 to i16
   %0 = arith.trunci %arg0 : i64 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunci2
 func.func @trunci2(%arg0: i32) -> i16 {
-  // CHECK: spv.SConvert %{{.*}} : i32 to i16
+  // CHECK: spirv.SConvert %{{.*}} : i32 to i16
   %0 = arith.trunci %arg0 : i32 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunc_to_i1
 func.func @trunc_to_i1(%arg0: i32) -> i1 {
-  // CHECK: %[[MASK:.*]] = spv.Constant 1 : i32
-  // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : i32
-  // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : i32
-  // CHECK-DAG: %[[TRUE:.*]] = spv.Constant true
-  // CHECK-DAG: %[[FALSE:.*]] = spv.Constant false
-  // CHECK: spv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : i1, i1
+  // CHECK: %[[MASK:.*]] = spirv.Constant 1 : i32
+  // CHECK: %[[MASKED_SRC:.*]] = spirv.BitwiseAnd %{{.*}}, %[[MASK]] : i32
+  // CHECK: %[[IS_ONE:.*]] = spirv.IEqual %[[MASKED_SRC]], %[[MASK]] : i32
+  // CHECK-DAG: %[[TRUE:.*]] = spirv.Constant true
+  // CHECK-DAG: %[[FALSE:.*]] = spirv.Constant false
+  // CHECK: spirv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : i1, i1
   %0 = arith.trunci %arg0 : i32 to i1
   return %0 : i1
 }
 
 // CHECK-LABEL: @trunc_to_veci1
 func.func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
-  // CHECK: %[[MASK:.*]] = spv.Constant dense<1> : vector<4xi32>
-  // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : vector<4xi32>
-  // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : vector<4xi32>
-  // CHECK-DAG: %[[TRUE:.*]] = spv.Constant dense<true> : vector<4xi1>
-  // CHECK-DAG: %[[FALSE:.*]] = spv.Constant dense<false> : vector<4xi1>
-  // CHECK: spv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : vector<4xi1>, vector<4xi1>
+  // CHECK: %[[MASK:.*]] = spirv.Constant dense<1> : vector<4xi32>
+  // CHECK: %[[MASKED_SRC:.*]] = spirv.BitwiseAnd %{{.*}}, %[[MASK]] : vector<4xi32>
+  // CHECK: %[[IS_ONE:.*]] = spirv.IEqual %[[MASKED_SRC]], %[[MASK]] : vector<4xi32>
+  // CHECK-DAG: %[[TRUE:.*]] = spirv.Constant dense<true> : vector<4xi1>
+  // CHECK-DAG: %[[FALSE:.*]] = spirv.Constant dense<false> : vector<4xi1>
+  // CHECK: spirv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : vector<4xi1>, vector<4xi1>
   %0 = arith.trunci %arg0 : vector<4xi32> to vector<4xi1>
   return %0 : vector<4xi1>
 }
 
 // CHECK-LABEL: @fptosi1
 func.func @fptosi1(%arg0 : f32) -> i32 {
-  // CHECK: spv.ConvertFToS %{{.*}} : f32 to i32
+  // CHECK: spirv.ConvertFToS %{{.*}} : f32 to i32
   %0 = arith.fptosi %arg0 : f32 to i32
   return %0 : i32
 }
 
 // CHECK-LABEL: @fptosi2
 func.func @fptosi2(%arg0 : f16) -> i16 {
-  // CHECK: spv.ConvertFToS %{{.*}} : f16 to i16
+  // CHECK: spirv.ConvertFToS %{{.*}} : f16 to i16
   %0 = arith.fptosi %arg0 : f16 to i16
   return %0 : i16
 }
@@ -912,14 +912,14 @@ func.func @fptosi2(%arg0 : f16) -> i16 {
 // Checks that cast types will be adjusted when missing special capabilities for
 // certain non-32-bit scalar types.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Float64], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @fpext1
 // CHECK-SAME: %[[A:.*]]: f16
 func.func @fpext1(%arg0: f16) -> f64 {
   // CHECK: %[[ARG:.+]] = builtin.unrealized_conversion_cast %[[A]] : f16 to f32
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0: f64
 }
@@ -927,7 +927,7 @@ func.func @fpext1(%arg0: f16) -> f64 {
 // CHECK-LABEL: @fpext2
 // CHECK-SAME: %[[ARG:.*]]: f32
 func.func @fpext2(%arg0 : f32) -> f64 {
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0: f64
 }
@@ -939,14 +939,14 @@ func.func @fpext2(%arg0 : f32) -> f64 {
 // Checks that cast types will be adjusted when missing special capabilities for
 // certain non-32-bit scalar types.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Float16], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @fptrunc1
 // CHECK-SAME: %[[A:.*]]: f64
 func.func @fptrunc1(%arg0 : f64) -> f16 {
   // CHECK: %[[ARG:.+]] = builtin.unrealized_conversion_cast %[[A]] : f64 to f32
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0: f16
 }
@@ -954,14 +954,14 @@ func.func @fptrunc1(%arg0 : f64) -> f16 {
 // CHECK-LABEL: @fptrunc2
 // CHECK-SAME: %[[ARG:.*]]: f32
 func.func @fptrunc2(%arg0: f32) -> f16 {
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0: f16
 }
 
 // CHECK-LABEL: @sitofp
 func.func @sitofp(%arg0 : i64) -> f64 {
-  // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
+  // CHECK: spirv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0: f64
 }
@@ -972,32 +972,32 @@ func.func @sitofp(%arg0 : i64) -> f64 {
 
 // Check various lowerings for OpenCL.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int16, Kernel], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int16, Kernel], []>, #spirv.resource_limits<>>
 } {
 
 // Check integer operation conversions.
 // CHECK-LABEL: @int32_scalar
 func.func @int32_scalar(%lhs: i32, %rhs: i32) {
-  // CHECK: spv.IAdd %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IAdd %{{.*}}, %{{.*}}: i32
   %0 = arith.addi %lhs, %rhs: i32
-  // CHECK: spv.ISub %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.ISub %{{.*}}, %{{.*}}: i32
   %1 = arith.subi %lhs, %rhs: i32
-  // CHECK: spv.IMul %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IMul %{{.*}}, %{{.*}}: i32
   %2 = arith.muli %lhs, %rhs: i32
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: i32
   %3 = arith.divsi %lhs, %rhs: i32
-  // CHECK: spv.UDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UDiv %{{.*}}, %{{.*}}: i32
   %4 = arith.divui %lhs, %rhs: i32
-  // CHECK: spv.UMod %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UMod %{{.*}}, %{{.*}}: i32
   %5 = arith.remui %lhs, %rhs: i32
-  // CHECK: spv.CL.s_max %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.CL.s_max %{{.*}}, %{{.*}}: i32
   %6 = arith.maxsi %lhs, %rhs : i32
-  // CHECK: spv.CL.u_max %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.CL.u_max %{{.*}}, %{{.*}}: i32
   %7 = arith.maxui %lhs, %rhs : i32
-  // CHECK: spv.CL.s_min %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.CL.s_min %{{.*}}, %{{.*}}: i32
   %8 = arith.minsi %lhs, %rhs : i32
-  // CHECK: spv.CL.u_min %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.CL.u_min %{{.*}}, %{{.*}}: i32
   %9 = arith.minui %lhs, %rhs : i32
   return
 }
@@ -1005,15 +1005,15 @@ func.func @int32_scalar(%lhs: i32, %rhs: i32) {
 // Check float binary operation conversions.
 // CHECK-LABEL: @float32_binary_scalar
 func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
-  // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %lhs, %rhs: f32
-  // CHECK: spv.FSub %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FSub %{{.*}}, %{{.*}}: f32
   %1 = arith.subf %lhs, %rhs: f32
-  // CHECK: spv.FMul %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FMul %{{.*}}, %{{.*}}: f32
   %2 = arith.mulf %lhs, %rhs: f32
-  // CHECK: spv.FDiv %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FDiv %{{.*}}, %{{.*}}: f32
   %3 = arith.divf %lhs, %rhs: f32
-  // CHECK: spv.FRem %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FRem %{{.*}}, %{{.*}}: f32
   %4 = arith.remf %lhs, %rhs: f32
   return
 }
@@ -1021,11 +1021,11 @@ func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
 // CHECK-LABEL: @float32_minf_scalar
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
 func.func @float32_minf_scalar(%arg0 : f32, %arg1 : f32) -> f32 {
-  // CHECK: %[[MIN:.+]] = spv.CL.fmin %arg0, %arg1 : f32
-  // CHECK: %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
-  // CHECK: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
-  // CHECK: %[[SELECT1:.+]] = spv.Select %[[LHS_NAN]], %[[LHS]], %[[MIN]]
-  // CHECK: %[[SELECT2:.+]] = spv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
+  // CHECK: %[[MIN:.+]] = spirv.CL.fmin %arg0, %arg1 : f32
+  // CHECK: %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : f32
+  // CHECK: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : f32
+  // CHECK: %[[SELECT1:.+]] = spirv.Select %[[LHS_NAN]], %[[LHS]], %[[MIN]]
+  // CHECK: %[[SELECT2:.+]] = spirv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
   %0 = arith.minf %arg0, %arg1 : f32
   // CHECK: return %[[SELECT2]]
   return %0: f32
@@ -1034,11 +1034,11 @@ func.func @float32_minf_scalar(%arg0 : f32, %arg1 : f32) -> f32 {
 // CHECK-LABEL: @float32_maxf_scalar
 // CHECK-SAME: %[[LHS:.+]]: vector<2xf32>, %[[RHS:.+]]: vector<2xf32>
 func.func @float32_maxf_scalar(%arg0 : vector<2xf32>, %arg1 : vector<2xf32>) -> vector<2xf32> {
-  // CHECK: %[[MAX:.+]] = spv.CL.fmax %arg0, %arg1 : vector<2xf32>
-  // CHECK: %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : vector<2xf32>
-  // CHECK: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : vector<2xf32>
-  // CHECK: %[[SELECT1:.+]] = spv.Select %[[LHS_NAN]], %[[LHS]], %[[MAX]]
-  // CHECK: %[[SELECT2:.+]] = spv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
+  // CHECK: %[[MAX:.+]] = spirv.CL.fmax %arg0, %arg1 : vector<2xf32>
+  // CHECK: %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : vector<2xf32>
+  // CHECK: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : vector<2xf32>
+  // CHECK: %[[SELECT1:.+]] = spirv.Select %[[LHS_NAN]], %[[LHS]], %[[MAX]]
+  // CHECK: %[[SELECT2:.+]] = spirv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
   %0 = arith.maxf %arg0, %arg1 : vector<2xf32>
   // CHECK: return %[[SELECT2]]
   return %0: vector<2xf32>
@@ -1047,12 +1047,12 @@ func.func @float32_maxf_scalar(%arg0 : vector<2xf32>, %arg1 : vector<2xf32>) ->
 // CHECK-LABEL: @scalar_srem
 // CHECK-SAME: (%[[LHS:.+]]: i32, %[[RHS:.+]]: i32)
 func.func @scalar_srem(%lhs: i32, %rhs: i32) {
-  // CHECK: %[[LABS:.+]] = spv.CL.s_abs %[[LHS]] : i32
-  // CHECK: %[[RABS:.+]] = spv.CL.s_abs %[[RHS]] : i32
-  // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : i32
-  // CHECK:  %[[POS:.+]] = spv.IEqual %[[LHS]], %[[LABS]] : i32
-  // CHECK:  %[[NEG:.+]] = spv.SNegate %[[ABS]] : i32
-  // CHECK:      %{{.+}} = spv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
+  // CHECK: %[[LABS:.+]] = spirv.CL.s_abs %[[LHS]] : i32
+  // CHECK: %[[RABS:.+]] = spirv.CL.s_abs %[[RHS]] : i32
+  // CHECK:  %[[ABS:.+]] = spirv.UMod %[[LABS]], %[[RABS]] : i32
+  // CHECK:  %[[POS:.+]] = spirv.IEqual %[[LHS]], %[[LABS]] : i32
+  // CHECK:  %[[NEG:.+]] = spirv.SNegate %[[ABS]] : i32
+  // CHECK:      %{{.+}} = spirv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
   %0 = arith.remsi %lhs, %rhs: i32
   return
 }
@@ -1060,12 +1060,12 @@ func.func @scalar_srem(%lhs: i32, %rhs: i32) {
 // CHECK-LABEL: @vector_srem
 // CHECK-SAME: (%[[LHS:.+]]: vector<3xi16>, %[[RHS:.+]]: vector<3xi16>)
 func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
-  // CHECK: %[[LABS:.+]] = spv.CL.s_abs %[[LHS]] : vector<3xi16>
-  // CHECK: %[[RABS:.+]] = spv.CL.s_abs %[[RHS]] : vector<3xi16>
-  // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
-  // CHECK:  %[[POS:.+]] = spv.IEqual %[[LHS]], %[[LABS]] : vector<3xi16>
-  // CHECK:  %[[NEG:.+]] = spv.SNegate %[[ABS]] : vector<3xi16>
-  // CHECK:      %{{.+}} = spv.Select %[[POS]], %[[ABS]], %[[NEG]] : vector<3xi1>, vector<3xi16>
+  // CHECK: %[[LABS:.+]] = spirv.CL.s_abs %[[LHS]] : vector<3xi16>
+  // CHECK: %[[RABS:.+]] = spirv.CL.s_abs %[[RHS]] : vector<3xi16>
+  // CHECK:  %[[ABS:.+]] = spirv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
+  // CHECK:  %[[POS:.+]] = spirv.IEqual %[[LHS]], %[[LABS]] : vector<3xi16>
+  // CHECK:  %[[NEG:.+]] = spirv.SNegate %[[ABS]] : vector<3xi16>
+  // CHECK:      %{{.+}} = spirv.Select %[[POS]], %[[ABS]], %[[NEG]] : vector<3xi1>, vector<3xi16>
   %0 = arith.remsi %arg0, %arg1: vector<3xi16>
   return
 }
@@ -1075,15 +1075,15 @@ func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader, Int8, Int16, Int64, Float16, Float64],
-             [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader, Int8, Int16, Int64, Float16, Float64],
+             [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @select
 func.func @select(%arg0 : i32, %arg1 : i32) {
   %0 = arith.cmpi sle, %arg0, %arg1 : i32
-  // CHECK: spv.Select
+  // CHECK: spirv.Select
   %1 = arith.select %0, %arg0, %arg1 : i32
   return
 }
@@ -1097,32 +1097,32 @@ func.func @select(%arg0 : i32, %arg1 : i32) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64, Shader], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64, Shader], []>, #spirv.resource_limits<>>
 } {
 
 // Check integer operation conversions.
 // CHECK-LABEL: @int32_scalar
 func.func @int32_scalar(%lhs: i32, %rhs: i32) {
-  // CHECK: spv.IAdd %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IAdd %{{.*}}, %{{.*}}: i32
   %0 = arith.addi %lhs, %rhs: i32
-  // CHECK: spv.ISub %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.ISub %{{.*}}, %{{.*}}: i32
   %1 = arith.subi %lhs, %rhs: i32
-  // CHECK: spv.IMul %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.IMul %{{.*}}, %{{.*}}: i32
   %2 = arith.muli %lhs, %rhs: i32
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: i32
   %3 = arith.divsi %lhs, %rhs: i32
-  // CHECK: spv.UDiv %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UDiv %{{.*}}, %{{.*}}: i32
   %4 = arith.divui %lhs, %rhs: i32
-  // CHECK: spv.UMod %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.UMod %{{.*}}, %{{.*}}: i32
   %5 = arith.remui %lhs, %rhs: i32
-  // CHECK: spv.GL.SMax %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.GL.SMax %{{.*}}, %{{.*}}: i32
   %6 = arith.maxsi %lhs, %rhs : i32
-  // CHECK: spv.GL.UMax %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.GL.UMax %{{.*}}, %{{.*}}: i32
   %7 = arith.maxui %lhs, %rhs : i32
-  // CHECK: spv.GL.SMin %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.GL.SMin %{{.*}}, %{{.*}}: i32
   %8 = arith.minsi %lhs, %rhs : i32
-  // CHECK: spv.GL.UMin %{{.*}}, %{{.*}}: i32
+  // CHECK: spirv.GL.UMin %{{.*}}, %{{.*}}: i32
   %9 = arith.minui %lhs, %rhs : i32
   return
 }
@@ -1130,12 +1130,12 @@ func.func @int32_scalar(%lhs: i32, %rhs: i32) {
 // CHECK-LABEL: @scalar_srem
 // CHECK-SAME: (%[[LHS:.+]]: i32, %[[RHS:.+]]: i32)
 func.func @scalar_srem(%lhs: i32, %rhs: i32) {
-  // CHECK: %[[LABS:.+]] = spv.GL.SAbs %[[LHS]] : i32
-  // CHECK: %[[RABS:.+]] = spv.GL.SAbs %[[RHS]] : i32
-  // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : i32
-  // CHECK:  %[[POS:.+]] = spv.IEqual %[[LHS]], %[[LABS]] : i32
-  // CHECK:  %[[NEG:.+]] = spv.SNegate %[[ABS]] : i32
-  // CHECK:      %{{.+}} = spv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
+  // CHECK: %[[LABS:.+]] = spirv.GL.SAbs %[[LHS]] : i32
+  // CHECK: %[[RABS:.+]] = spirv.GL.SAbs %[[RHS]] : i32
+  // CHECK:  %[[ABS:.+]] = spirv.UMod %[[LABS]], %[[RABS]] : i32
+  // CHECK:  %[[POS:.+]] = spirv.IEqual %[[LHS]], %[[LABS]] : i32
+  // CHECK:  %[[NEG:.+]] = spirv.SNegate %[[ABS]] : i32
+  // CHECK:      %{{.+}} = spirv.Select %[[POS]], %[[ABS]], %[[NEG]] : i1, i32
   %0 = arith.remsi %lhs, %rhs: i32
   return
 }
@@ -1143,7 +1143,7 @@ func.func @scalar_srem(%lhs: i32, %rhs: i32) {
 // Check float unary operation conversions.
 // CHECK-LABEL: @float32_unary_scalar
 func.func @float32_unary_scalar(%arg0: f32) {
-  // CHECK: spv.FNegate %{{.*}}: f32
+  // CHECK: spirv.FNegate %{{.*}}: f32
   %5 = arith.negf %arg0 : f32
   return
 }
@@ -1151,15 +1151,15 @@ func.func @float32_unary_scalar(%arg0: f32) {
 // Check float binary operation conversions.
 // CHECK-LABEL: @float32_binary_scalar
 func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
-  // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %lhs, %rhs: f32
-  // CHECK: spv.FSub %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FSub %{{.*}}, %{{.*}}: f32
   %1 = arith.subf %lhs, %rhs: f32
-  // CHECK: spv.FMul %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FMul %{{.*}}, %{{.*}}: f32
   %2 = arith.mulf %lhs, %rhs: f32
-  // CHECK: spv.FDiv %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FDiv %{{.*}}, %{{.*}}: f32
   %3 = arith.divf %lhs, %rhs: f32
-  // CHECK: spv.FRem %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FRem %{{.*}}, %{{.*}}: f32
   %4 = arith.remf %lhs, %rhs: f32
   return
 }
@@ -1167,11 +1167,11 @@ func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
 // CHECK-LABEL: @float32_minf_scalar
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
 func.func @float32_minf_scalar(%arg0 : f32, %arg1 : f32) -> f32 {
-  // CHECK: %[[MIN:.+]] = spv.GL.FMin %arg0, %arg1 : f32
-  // CHECK: %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
-  // CHECK: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
-  // CHECK: %[[SELECT1:.+]] = spv.Select %[[LHS_NAN]], %[[LHS]], %[[MIN]]
-  // CHECK: %[[SELECT2:.+]] = spv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
+  // CHECK: %[[MIN:.+]] = spirv.GL.FMin %arg0, %arg1 : f32
+  // CHECK: %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : f32
+  // CHECK: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : f32
+  // CHECK: %[[SELECT1:.+]] = spirv.Select %[[LHS_NAN]], %[[LHS]], %[[MIN]]
+  // CHECK: %[[SELECT2:.+]] = spirv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
   %0 = arith.minf %arg0, %arg1 : f32
   // CHECK: return %[[SELECT2]]
   return %0: f32
@@ -1180,11 +1180,11 @@ func.func @float32_minf_scalar(%arg0 : f32, %arg1 : f32) -> f32 {
 // CHECK-LABEL: @float32_maxf_scalar
 // CHECK-SAME: %[[LHS:.+]]: vector<2xf32>, %[[RHS:.+]]: vector<2xf32>
 func.func @float32_maxf_scalar(%arg0 : vector<2xf32>, %arg1 : vector<2xf32>) -> vector<2xf32> {
-  // CHECK: %[[MAX:.+]] = spv.GL.FMax %arg0, %arg1 : vector<2xf32>
-  // CHECK: %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : vector<2xf32>
-  // CHECK: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : vector<2xf32>
-  // CHECK: %[[SELECT1:.+]] = spv.Select %[[LHS_NAN]], %[[LHS]], %[[MAX]]
-  // CHECK: %[[SELECT2:.+]] = spv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
+  // CHECK: %[[MAX:.+]] = spirv.GL.FMax %arg0, %arg1 : vector<2xf32>
+  // CHECK: %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : vector<2xf32>
+  // CHECK: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : vector<2xf32>
+  // CHECK: %[[SELECT1:.+]] = spirv.Select %[[LHS_NAN]], %[[LHS]], %[[MAX]]
+  // CHECK: %[[SELECT2:.+]] = spirv.Select %[[RHS_NAN]], %[[RHS]], %[[SELECT1]]
   %0 = arith.maxf %arg0, %arg1 : vector<2xf32>
   // CHECK: return %[[SELECT2]]
   return %0: vector<2xf32>
@@ -1193,9 +1193,9 @@ func.func @float32_maxf_scalar(%arg0 : vector<2xf32>, %arg1 : vector<2xf32>) ->
 // Check int vector types.
 // CHECK-LABEL: @int_vector234
 func.func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi8>
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: vector<2xi8>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
-  // CHECK: spv.UDiv %{{.*}}, %{{.*}}: vector<4xi64>
+  // CHECK: spirv.UDiv %{{.*}}, %{{.*}}: vector<4xi64>
   %1 = arith.divui %arg1, %arg1: vector<4xi64>
   return
 }
@@ -1203,12 +1203,12 @@ func.func @int_vector234(%arg0: vector<2xi8>, %arg1: vector<4xi64>) {
 // CHECK-LABEL: @vector_srem
 // CHECK-SAME: (%[[LHS:.+]]: vector<3xi16>, %[[RHS:.+]]: vector<3xi16>)
 func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
-  // CHECK: %[[LABS:.+]] = spv.GL.SAbs %[[LHS]] : vector<3xi16>
-  // CHECK: %[[RABS:.+]] = spv.GL.SAbs %[[RHS]] : vector<3xi16>
-  // CHECK:  %[[ABS:.+]] = spv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
-  // CHECK:  %[[POS:.+]] = spv.IEqual %[[LHS]], %[[LABS]] : vector<3xi16>
-  // CHECK:  %[[NEG:.+]] = spv.SNegate %[[ABS]] : vector<3xi16>
-  // CHECK:      %{{.+}} = spv.Select %[[POS]], %[[ABS]], %[[NEG]] : vector<3xi1>, vector<3xi16>
+  // CHECK: %[[LABS:.+]] = spirv.GL.SAbs %[[LHS]] : vector<3xi16>
+  // CHECK: %[[RABS:.+]] = spirv.GL.SAbs %[[RHS]] : vector<3xi16>
+  // CHECK:  %[[ABS:.+]] = spirv.UMod %[[LABS]], %[[RABS]] : vector<3xi16>
+  // CHECK:  %[[POS:.+]] = spirv.IEqual %[[LHS]], %[[LABS]] : vector<3xi16>
+  // CHECK:  %[[NEG:.+]] = spirv.SNegate %[[ABS]] : vector<3xi16>
+  // CHECK:      %{{.+}} = spirv.Select %[[POS]], %[[ABS]], %[[NEG]] : vector<3xi1>, vector<3xi16>
   %0 = arith.remsi %arg0, %arg1: vector<3xi16>
   return
 }
@@ -1216,16 +1216,16 @@ func.func @vector_srem(%arg0: vector<3xi16>, %arg1: vector<3xi16>) {
 // Check float vector types.
 // CHECK-LABEL: @float_vector234
 func.func @float_vector234(%arg0: vector<2xf16>, %arg1: vector<3xf64>) {
-  // CHECK: spv.FAdd %{{.*}}, %{{.*}}: vector<2xf16>
+  // CHECK: spirv.FAdd %{{.*}}, %{{.*}}: vector<2xf16>
   %0 = arith.addf %arg0, %arg0: vector<2xf16>
-  // CHECK: spv.FMul %{{.*}}, %{{.*}}: vector<3xf64>
+  // CHECK: spirv.FMul %{{.*}}, %{{.*}}: vector<3xf64>
   %1 = arith.mulf %arg1, %arg1: vector<3xf64>
   return
 }
 
 // CHECK-LABEL: @one_elem_vector
 func.func @one_elem_vector(%arg0: vector<1xi32>) {
-  // CHECK: spv.IAdd %{{.+}}, %{{.+}}: i32
+  // CHECK: spirv.IAdd %{{.+}}, %{{.+}}: i32
   %0 = arith.addi %arg0, %arg0: vector<1xi32>
   return
 }
@@ -1250,23 +1250,23 @@ func.func @unsupported_2x2elem_vector(%arg0: vector<2x2xi32>) {
 
 // Check that types are converted to 32-bit when no special capabilities.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @int_vector23
 func.func @int_vector23(%arg0: vector<2xi8>, %arg1: vector<3xi16>) {
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<2xi32>
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: vector<2xi32>
   %0 = arith.divsi %arg0, %arg0: vector<2xi8>
-  // CHECK: spv.SDiv %{{.*}}, %{{.*}}: vector<3xi32>
+  // CHECK: spirv.SDiv %{{.*}}, %{{.*}}: vector<3xi32>
   %1 = arith.divsi %arg1, %arg1: vector<3xi16>
   return
 }
 
 // CHECK-LABEL: @float_scalar
 func.func @float_scalar(%arg0: f16, %arg1: f64) {
-  // CHECK: spv.FAdd %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FAdd %{{.*}}, %{{.*}}: f32
   %0 = arith.addf %arg0, %arg0: f16
-  // CHECK: spv.FMul %{{.*}}, %{{.*}}: f32
+  // CHECK: spirv.FMul %{{.*}}, %{{.*}}: f32
   %1 = arith.mulf %arg1, %arg1: f64
   return
 }
@@ -1278,7 +1278,7 @@ func.func @float_scalar(%arg0: f16, %arg1: f64) {
 // Check that types are converted to 32-bit when no special capabilities that
 // are not supported.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 func.func @int_vector4_invalid(%arg0: vector<4xi64>) {
@@ -1296,71 +1296,71 @@ func.func @int_vector4_invalid(%arg0: vector<4xi64>) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @bitwise_scalar
 func.func @bitwise_scalar(%arg0 : i32, %arg1 : i32) {
-  // CHECK: spv.BitwiseAnd
+  // CHECK: spirv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : i32
-  // CHECK: spv.BitwiseOr
+  // CHECK: spirv.BitwiseOr
   %1 = arith.ori %arg0, %arg1 : i32
-  // CHECK: spv.BitwiseXor
+  // CHECK: spirv.BitwiseXor
   %2 = arith.xori %arg0, %arg1 : i32
   return
 }
 
 // CHECK-LABEL: @bitwise_vector
 func.func @bitwise_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
-  // CHECK: spv.BitwiseAnd
+  // CHECK: spirv.BitwiseAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.BitwiseOr
+  // CHECK: spirv.BitwiseOr
   %1 = arith.ori %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.BitwiseXor
+  // CHECK: spirv.BitwiseXor
   %2 = arith.xori %arg0, %arg1 : vector<4xi32>
   return
 }
 
 // CHECK-LABEL: @logical_scalar
 func.func @logical_scalar(%arg0 : i1, %arg1 : i1) {
-  // CHECK: spv.LogicalAnd
+  // CHECK: spirv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : i1
-  // CHECK: spv.LogicalOr
+  // CHECK: spirv.LogicalOr
   %1 = arith.ori %arg0, %arg1 : i1
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %2 = arith.xori %arg0, %arg1 : i1
   return
 }
 
 // CHECK-LABEL: @logical_vector
 func.func @logical_vector(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
-  // CHECK: spv.LogicalAnd
+  // CHECK: spirv.LogicalAnd
   %0 = arith.andi %arg0, %arg1 : vector<4xi1>
-  // CHECK: spv.LogicalOr
+  // CHECK: spirv.LogicalOr
   %1 = arith.ori %arg0, %arg1 : vector<4xi1>
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %2 = arith.xori %arg0, %arg1 : vector<4xi1>
   return
 }
 
 // CHECK-LABEL: @shift_scalar
 func.func @shift_scalar(%arg0 : i32, %arg1 : i32) {
-  // CHECK: spv.ShiftLeftLogical
+  // CHECK: spirv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : i32
-  // CHECK: spv.ShiftRightArithmetic
+  // CHECK: spirv.ShiftRightArithmetic
   %1 = arith.shrsi %arg0, %arg1 : i32
-  // CHECK: spv.ShiftRightLogical
+  // CHECK: spirv.ShiftRightLogical
   %2 = arith.shrui %arg0, %arg1 : i32
   return
 }
 
 // CHECK-LABEL: @shift_vector
 func.func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
-  // CHECK: spv.ShiftLeftLogical
+  // CHECK: spirv.ShiftLeftLogical
   %0 = arith.shli %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.ShiftRightArithmetic
+  // CHECK: spirv.ShiftRightArithmetic
   %1 = arith.shrsi %arg0, %arg1 : vector<4xi32>
-  // CHECK: spv.ShiftRightLogical
+  // CHECK: spirv.ShiftRightLogical
   %2 = arith.shrui %arg0, %arg1 : vector<4xi32>
   return
 }
@@ -1374,43 +1374,43 @@ func.func @shift_vector(%arg0 : vector<4xi32>, %arg1 : vector<4xi32>) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpf
 func.func @cmpf(%arg0 : f32, %arg1 : f32) {
-  // CHECK: spv.FOrdEqual
+  // CHECK: spirv.FOrdEqual
   %1 = arith.cmpf oeq, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdGreaterThan
+  // CHECK: spirv.FOrdGreaterThan
   %2 = arith.cmpf ogt, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdGreaterThanEqual
+  // CHECK: spirv.FOrdGreaterThanEqual
   %3 = arith.cmpf oge, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdLessThan
+  // CHECK: spirv.FOrdLessThan
   %4 = arith.cmpf olt, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdLessThanEqual
+  // CHECK: spirv.FOrdLessThanEqual
   %5 = arith.cmpf ole, %arg0, %arg1 : f32
-  // CHECK: spv.FOrdNotEqual
+  // CHECK: spirv.FOrdNotEqual
   %6 = arith.cmpf one, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordEqual
+  // CHECK: spirv.FUnordEqual
   %7 = arith.cmpf ueq, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordGreaterThan
+  // CHECK: spirv.FUnordGreaterThan
   %8 = arith.cmpf ugt, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordGreaterThanEqual
+  // CHECK: spirv.FUnordGreaterThanEqual
   %9 = arith.cmpf uge, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordLessThan
+  // CHECK: spirv.FUnordLessThan
   %10 = arith.cmpf ult, %arg0, %arg1 : f32
   // CHECK: FUnordLessThanEqual
   %11 = arith.cmpf ule, %arg0, %arg1 : f32
-  // CHECK: spv.FUnordNotEqual
+  // CHECK: spirv.FUnordNotEqual
   %12 = arith.cmpf une, %arg0, %arg1 : f32
   return
 }
 
 // CHECK-LABEL: @vec1cmpf
 func.func @vec1cmpf(%arg0 : vector<1xf32>, %arg1 : vector<1xf32>) {
-  // CHECK: spv.FOrdGreaterThan
+  // CHECK: spirv.FOrdGreaterThan
   %0 = arith.cmpf ogt, %arg0, %arg1 : vector<1xf32>
-  // CHECK: spv.FUnordLessThan
+  // CHECK: spirv.FUnordLessThan
   %1 = arith.cmpf ult, %arg0, %arg1 : vector<1xf32>
   return
 }
@@ -1419,16 +1419,16 @@ func.func @vec1cmpf(%arg0 : vector<1xf32>, %arg1 : vector<1xf32>) {
 
 // -----
 
-// With Kernel capability, we can convert NaN check to spv.Ordered/spv.Unordered.
+// With Kernel capability, we can convert NaN check to spirv.Ordered/spirv.Unordered.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Kernel], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpf
 func.func @cmpf(%arg0 : f32, %arg1 : f32) {
-  // CHECK: spv.Ordered
+  // CHECK: spirv.Ordered
   %0 = arith.cmpf ord, %arg0, %arg1 : f32
-  // CHECK: spv.Unordered
+  // CHECK: spirv.Unordered
   %1 = arith.cmpf uno, %arg0, %arg1 : f32
   return
 }
@@ -1437,23 +1437,23 @@ func.func @cmpf(%arg0 : f32, %arg1 : f32) {
 
 // -----
 
-// Without Kernel capability, we need to convert NaN check to spv.IsNan.
+// Without Kernel capability, we need to convert NaN check to spirv.IsNan.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpf
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
 func.func @cmpf(%arg0 : f32, %arg1 : f32) {
-  // CHECK:      %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
-  // CHECK-NEXT: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
-  // CHECK-NEXT: %[[OR:.+]] = spv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
-  // CHECK-NEXT: %{{.+}} = spv.LogicalNot %[[OR]] : i1
+  // CHECK:      %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : f32
+  // CHECK-NEXT: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : f32
+  // CHECK-NEXT: %[[OR:.+]] = spirv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
+  // CHECK-NEXT: %{{.+}} = spirv.LogicalNot %[[OR]] : i1
   %0 = arith.cmpf ord, %arg0, %arg1 : f32
 
-  // CHECK-NEXT: %[[LHS_NAN:.+]] = spv.IsNan %[[LHS]] : f32
-  // CHECK-NEXT: %[[RHS_NAN:.+]] = spv.IsNan %[[RHS]] : f32
-  // CHECK-NEXT: %{{.+}} = spv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
+  // CHECK-NEXT: %[[LHS_NAN:.+]] = spirv.IsNan %[[LHS]] : f32
+  // CHECK-NEXT: %[[RHS_NAN:.+]] = spirv.IsNan %[[RHS]] : f32
+  // CHECK-NEXT: %{{.+}} = spirv.LogicalOr %[[LHS_NAN]], %[[RHS_NAN]] : i1
   %1 = arith.cmpf uno, %arg0, %arg1 : f32
   return
 }
@@ -1467,48 +1467,48 @@ func.func @cmpf(%arg0 : f32, %arg1 : f32) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpi
 func.func @cmpi(%arg0 : i32, %arg1 : i32) {
-  // CHECK: spv.IEqual
+  // CHECK: spirv.IEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i32
-  // CHECK: spv.INotEqual
+  // CHECK: spirv.INotEqual
   %1 = arith.cmpi ne, %arg0, %arg1 : i32
-  // CHECK: spv.SLessThan
+  // CHECK: spirv.SLessThan
   %2 = arith.cmpi slt, %arg0, %arg1 : i32
-  // CHECK: spv.SLessThanEqual
+  // CHECK: spirv.SLessThanEqual
   %3 = arith.cmpi sle, %arg0, %arg1 : i32
-  // CHECK: spv.SGreaterThan
+  // CHECK: spirv.SGreaterThan
   %4 = arith.cmpi sgt, %arg0, %arg1 : i32
-  // CHECK: spv.SGreaterThanEqual
+  // CHECK: spirv.SGreaterThanEqual
   %5 = arith.cmpi sge, %arg0, %arg1 : i32
-  // CHECK: spv.ULessThan
+  // CHECK: spirv.ULessThan
   %6 = arith.cmpi ult, %arg0, %arg1 : i32
-  // CHECK: spv.ULessThanEqual
+  // CHECK: spirv.ULessThanEqual
   %7 = arith.cmpi ule, %arg0, %arg1 : i32
-  // CHECK: spv.UGreaterThan
+  // CHECK: spirv.UGreaterThan
   %8 = arith.cmpi ugt, %arg0, %arg1 : i32
-  // CHECK: spv.UGreaterThanEqual
+  // CHECK: spirv.UGreaterThanEqual
   %9 = arith.cmpi uge, %arg0, %arg1 : i32
   return
 }
 
 // CHECK-LABEL: @boolcmpi
 func.func @boolcmpi(%arg0 : i1, %arg1 : i1) {
-  // CHECK: spv.LogicalEqual
+  // CHECK: spirv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : i1
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %1 = arith.cmpi ne, %arg0, %arg1 : i1
   return
 }
 
 // CHECK-LABEL: @vecboolcmpi
 func.func @vecboolcmpi(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
-  // CHECK: spv.LogicalEqual
+  // CHECK: spirv.LogicalEqual
   %0 = arith.cmpi eq, %arg0, %arg1 : vector<4xi1>
-  // CHECK: spv.LogicalNotEqual
+  // CHECK: spirv.LogicalNotEqual
   %1 = arith.cmpi ne, %arg0, %arg1 : vector<4xi1>
   return
 }
@@ -1522,59 +1522,59 @@ func.func @vecboolcmpi(%arg0 : vector<4xi1>, %arg1 : vector<4xi1>) {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @constant
 func.func @constant() {
-  // CHECK: spv.Constant true
+  // CHECK: spirv.Constant true
   %0 = arith.constant true
-  // CHECK: spv.Constant 42 : i32
+  // CHECK: spirv.Constant 42 : i32
   %1 = arith.constant 42 : i32
-  // CHECK: spv.Constant 5.000000e-01 : f32
+  // CHECK: spirv.Constant 5.000000e-01 : f32
   %2 = arith.constant 0.5 : f32
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi32>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi32>
   %3 = arith.constant dense<[2, 3]> : vector<2xi32>
-  // CHECK: spv.Constant 1 : i32
+  // CHECK: spirv.Constant 1 : i32
   %4 = arith.constant 1 : index
-  // CHECK: spv.Constant dense<1> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<1> : tensor<6xi32> : !spirv.array<6 x i32>
   %5 = arith.constant dense<1> : tensor<2x3xi32>
-  // CHECK: spv.Constant dense<1.000000e+00> : tensor<6xf32> : !spv.array<6 x f32>
+  // CHECK: spirv.Constant dense<1.000000e+00> : tensor<6xf32> : !spirv.array<6 x f32>
   %6 = arith.constant dense<1.0> : tensor<2x3xf32>
-  // CHECK: spv.Constant dense<{{\[}}1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf32> : !spv.array<6 x f32>
+  // CHECK: spirv.Constant dense<{{\[}}1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf32> : !spirv.array<6 x f32>
   %7 = arith.constant dense<[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]> : tensor<2x3xf32>
-  // CHECK: spv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spirv.array<6 x i32>
   %8 = arith.constant dense<[[1, 2, 3], [4, 5, 6]]> : tensor<2x3xi32>
-  // CHECK: spv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spirv.array<6 x i32>
   %9 = arith.constant dense<[[1, 2], [3, 4], [5, 6]]> : tensor<3x2xi32>
-  // CHECK: spv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spv.array<6 x i32>
+  // CHECK: spirv.Constant dense<{{\[}}1, 2, 3, 4, 5, 6]> : tensor<6xi32> : !spirv.array<6 x i32>
   %10 = arith.constant dense<[1, 2, 3, 4, 5, 6]> : tensor<6xi32>
   return
 }
 
 // CHECK-LABEL: @constant_16bit
 func.func @constant_16bit() {
-  // CHECK: spv.Constant 4 : i16
+  // CHECK: spirv.Constant 4 : i16
   %0 = arith.constant 4 : i16
-  // CHECK: spv.Constant 5.000000e+00 : f16
+  // CHECK: spirv.Constant 5.000000e+00 : f16
   %1 = arith.constant 5.0 : f16
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi16>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi16>
   %2 = arith.constant dense<[2, 3]> : vector<2xi16>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf16> : !spv.array<5 x f16>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf16> : !spirv.array<5 x f16>
   %3 = arith.constant dense<4.0> : tensor<5xf16>
   return
 }
 
 // CHECK-LABEL: @constant_64bit
 func.func @constant_64bit() {
-  // CHECK: spv.Constant 4 : i64
+  // CHECK: spirv.Constant 4 : i64
   %0 = arith.constant 4 : i64
-  // CHECK: spv.Constant 5.000000e+00 : f64
+  // CHECK: spirv.Constant 5.000000e+00 : f64
   %1 = arith.constant 5.0 : f64
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi64>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi64>
   %2 = arith.constant dense<[2, 3]> : vector<2xi64>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf64> : !spv.array<5 x f64>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf64> : !spirv.array<5 x f64>
   %3 = arith.constant dense<4.0> : tensor<5xf64>
   return
 }
@@ -1585,63 +1585,63 @@ func.func @constant_64bit() {
 
 // Check that constants are converted to 32-bit when no special capability.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @constant_16bit
 func.func @constant_16bit() {
-  // CHECK: spv.Constant 4 : i32
+  // CHECK: spirv.Constant 4 : i32
   %0 = arith.constant 4 : i16
-  // CHECK: spv.Constant 5.000000e+00 : f32
+  // CHECK: spirv.Constant 5.000000e+00 : f32
   %1 = arith.constant 5.0 : f16
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi32>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi32>
   %2 = arith.constant dense<[2, 3]> : vector<2xi16>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf32> : !spv.array<5 x f32>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf32> : !spirv.array<5 x f32>
   %3 = arith.constant dense<4.0> : tensor<5xf16>
-  // CHECK: spv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spv.array<4 x f32>
+  // CHECK: spirv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spirv.array<4 x f32>
   %4 = arith.constant dense<[[1.0, 2.0], [3.0, 4.0]]> : tensor<2x2xf16>
   return
 }
 
 // CHECK-LABEL: @constant_64bit
 func.func @constant_64bit() {
-  // CHECK: spv.Constant 4 : i32
+  // CHECK: spirv.Constant 4 : i32
   %0 = arith.constant 4 : i64
-  // CHECK: spv.Constant 5.000000e+00 : f32
+  // CHECK: spirv.Constant 5.000000e+00 : f32
   %1 = arith.constant 5.0 : f64
-  // CHECK: spv.Constant dense<[2, 3]> : vector<2xi32>
+  // CHECK: spirv.Constant dense<[2, 3]> : vector<2xi32>
   %2 = arith.constant dense<[2, 3]> : vector<2xi64>
-  // CHECK: spv.Constant dense<4.000000e+00> : tensor<5xf32> : !spv.array<5 x f32>
+  // CHECK: spirv.Constant dense<4.000000e+00> : tensor<5xf32> : !spirv.array<5 x f32>
   %3 = arith.constant dense<4.0> : tensor<5xf64>
-  // CHECK: spv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spv.array<4 x f32>
+  // CHECK: spirv.Constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00]> : tensor<4xf32> : !spirv.array<4 x f32>
   %4 = arith.constant dense<[[1.0, 2.0], [3.0, 4.0]]> : tensor<2x2xf16>
   return
 }
 
 // CHECK-LABEL: @corner_cases
 func.func @corner_cases() {
-  // CHECK: %{{.*}} = spv.Constant -1 : i32
+  // CHECK: %{{.*}} = spirv.Constant -1 : i32
   %0 = arith.constant 4294967295  : i64 // 2^32 - 1
-  // CHECK: %{{.*}} = spv.Constant 2147483647 : i32
+  // CHECK: %{{.*}} = spirv.Constant 2147483647 : i32
   %1 = arith.constant 2147483647  : i64 // 2^31 - 1
-  // CHECK: %{{.*}} = spv.Constant -2147483648 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2147483648 : i32
   %2 = arith.constant 2147483648  : i64 // 2^31
-  // CHECK: %{{.*}} = spv.Constant -2147483648 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2147483648 : i32
   %3 = arith.constant -2147483648 : i64 // -2^31
 
-  // CHECK: %{{.*}} = spv.Constant -1 : i32
+  // CHECK: %{{.*}} = spirv.Constant -1 : i32
   %5 = arith.constant -1 : i64
-  // CHECK: %{{.*}} = spv.Constant -2 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2 : i32
   %6 = arith.constant -2 : i64
-  // CHECK: %{{.*}} = spv.Constant -1 : i32
+  // CHECK: %{{.*}} = spirv.Constant -1 : i32
   %7 = arith.constant -1 : index
-  // CHECK: %{{.*}} = spv.Constant -2 : i32
+  // CHECK: %{{.*}} = spirv.Constant -2 : i32
   %8 = arith.constant -2 : index
 
 
-  // CHECK: spv.Constant false
+  // CHECK: spirv.Constant false
   %9 = arith.constant false
-  // CHECK: spv.Constant true
+  // CHECK: spirv.Constant true
   %10 = arith.constant true
 
   return
@@ -1667,235 +1667,235 @@ func.func @unsupported_cases() {
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: index_cast1
 func.func @index_cast1(%arg0: i16) {
-  // CHECK: spv.SConvert %{{.+}} : i16 to i32
+  // CHECK: spirv.SConvert %{{.+}} : i16 to i32
   %0 = arith.index_cast %arg0 : i16 to index
   return
 }
 
 // CHECK-LABEL: index_cast2
 func.func @index_cast2(%arg0: index) {
-  // CHECK: spv.SConvert %{{.+}} : i32 to i16
+  // CHECK: spirv.SConvert %{{.+}} : i32 to i16
   %0 = arith.index_cast %arg0 : index to i16
   return
 }
 
 // CHECK-LABEL: index_cast3
 func.func @index_cast3(%arg0: i32) {
-  // CHECK-NOT: spv.SConvert
+  // CHECK-NOT: spirv.SConvert
   %0 = arith.index_cast %arg0 : i32 to index
   return
 }
 
 // CHECK-LABEL: index_cast4
 func.func @index_cast4(%arg0: index) {
-  // CHECK-NOT: spv.SConvert
+  // CHECK-NOT: spirv.SConvert
   %0 = arith.index_cast %arg0 : index to i32
   return
 }
 
 // CHECK-LABEL: @fpext1
 func.func @fpext1(%arg0: f16) -> f64 {
-  // CHECK: spv.FConvert %{{.*}} : f16 to f64
+  // CHECK: spirv.FConvert %{{.*}} : f16 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fpext2
 func.func @fpext2(%arg0 : f32) -> f64 {
-  // CHECK: spv.FConvert %{{.*}} : f32 to f64
+  // CHECK: spirv.FConvert %{{.*}} : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @fptrunc1
 func.func @fptrunc1(%arg0 : f64) -> f16 {
-  // CHECK: spv.FConvert %{{.*}} : f64 to f16
+  // CHECK: spirv.FConvert %{{.*}} : f64 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @fptrunc2
 func.func @fptrunc2(%arg0: f32) -> f16 {
-  // CHECK: spv.FConvert %{{.*}} : f32 to f16
+  // CHECK: spirv.FConvert %{{.*}} : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0 : f16
 }
 
 // CHECK-LABEL: @sitofp1
 func.func @sitofp1(%arg0 : i32) -> f32 {
-  // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
+  // CHECK: spirv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @sitofp2
 func.func @sitofp2(%arg0 : i64) -> f64 {
-  // CHECK: spv.ConvertSToF %{{.*}} : i64 to f64
+  // CHECK: spirv.ConvertSToF %{{.*}} : i64 to f64
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @uitofp_i16_f32
 func.func @uitofp_i16_f32(%arg0: i16) -> f32 {
-  // CHECK: spv.ConvertUToF %{{.*}} : i16 to f32
+  // CHECK: spirv.ConvertUToF %{{.*}} : i16 to f32
   %0 = arith.uitofp %arg0 : i16 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i32_f32
 func.func @uitofp_i32_f32(%arg0 : i32) -> f32 {
-  // CHECK: spv.ConvertUToF %{{.*}} : i32 to f32
+  // CHECK: spirv.ConvertUToF %{{.*}} : i32 to f32
   %0 = arith.uitofp %arg0 : i32 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i1_f32
 func.func @uitofp_i1_f32(%arg0 : i1) -> f32 {
-  // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f32
+  // CHECK: %[[ZERO:.+]] = spirv.Constant 0.000000e+00 : f32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f32
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f32
   %0 = arith.uitofp %arg0 : i1 to f32
   return %0 : f32
 }
 
 // CHECK-LABEL: @uitofp_i1_f64
 func.func @uitofp_i1_f64(%arg0 : i1) -> f64 {
-  // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00 : f64
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f64
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f64
+  // CHECK: %[[ZERO:.+]] = spirv.Constant 0.000000e+00 : f64
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f64
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, f64
   %0 = arith.uitofp %arg0 : i1 to f64
   return %0 : f64
 }
 
 // CHECK-LABEL: @uitofp_vec_i1_f32
 func.func @uitofp_vec_i1_f32(%arg0 : vector<4xi1>) -> vector<4xf32> {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0.000000e+00> : vector<4xf32>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<4xf32>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf32>
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0.000000e+00> : vector<4xf32>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<4xf32>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf32>
   %0 = arith.uitofp %arg0 : vector<4xi1> to vector<4xf32>
   return %0 : vector<4xf32>
 }
 
 // CHECK-LABEL: @uitofp_vec_i1_f64
-spv.func @uitofp_vec_i1_f64(%arg0: vector<4xi1>) -> vector<4xf64> "None" {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0.000000e+00> : vector<4xf64>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<4xf64>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf64>
-  %0 = spv.Constant dense<0.000000e+00> : vector<4xf64>
-  %1 = spv.Constant dense<1.000000e+00> : vector<4xf64>
-  %2 = spv.Select %arg0, %1, %0 : vector<4xi1>, vector<4xf64>
-  spv.ReturnValue %2 : vector<4xf64>
+spirv.func @uitofp_vec_i1_f64(%arg0: vector<4xi1>) -> vector<4xf64> "None" {
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0.000000e+00> : vector<4xf64>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<4xf64>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xf64>
+  %0 = spirv.Constant dense<0.000000e+00> : vector<4xf64>
+  %1 = spirv.Constant dense<1.000000e+00> : vector<4xf64>
+  %2 = spirv.Select %arg0, %1, %0 : vector<4xi1>, vector<4xf64>
+  spirv.ReturnValue %2 : vector<4xf64>
 }
 
 // CHECK-LABEL: @sexti1
 func.func @sexti1(%arg0: i16) -> i64 {
-  // CHECK: spv.SConvert %{{.*}} : i16 to i64
+  // CHECK: spirv.SConvert %{{.*}} : i16 to i64
   %0 = arith.extsi %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @sexti2
 func.func @sexti2(%arg0 : i32) -> i64 {
-  // CHECK: spv.SConvert %{{.*}} : i32 to i64
+  // CHECK: spirv.SConvert %{{.*}} : i32 to i64
   %0 = arith.extsi %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti1
 func.func @zexti1(%arg0: i16) -> i64 {
-  // CHECK: spv.UConvert %{{.*}} : i16 to i64
+  // CHECK: spirv.UConvert %{{.*}} : i16 to i64
   %0 = arith.extui %arg0 : i16 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti2
 func.func @zexti2(%arg0 : i32) -> i64 {
-  // CHECK: spv.UConvert %{{.*}} : i32 to i64
+  // CHECK: spirv.UConvert %{{.*}} : i32 to i64
   %0 = arith.extui %arg0 : i32 to i64
   return %0 : i64
 }
 
 // CHECK-LABEL: @zexti3
 func.func @zexti3(%arg0 : i1) -> i32 {
-  // CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, i32
+  // CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : i1, i32
   %0 = arith.extui %arg0 : i1 to i32
   return %0 : i32
 }
 
 // CHECK-LABEL: @zexti4
 func.func @zexti4(%arg0 : vector<4xi1>) -> vector<4xi32> {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi32>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi32>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi32>
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0> : vector<4xi32>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1> : vector<4xi32>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi32>
   %0 = arith.extui %arg0 : vector<4xi1> to vector<4xi32>
   return %0 : vector<4xi32>
 }
 
 // CHECK-LABEL: @zexti5
 func.func @zexti5(%arg0 : vector<4xi1>) -> vector<4xi64> {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0> : vector<4xi64>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1> : vector<4xi64>
-  // CHECK: spv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi64>
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0> : vector<4xi64>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1> : vector<4xi64>
+  // CHECK: spirv.Select %{{.*}}, %[[ONE]], %[[ZERO]] : vector<4xi1>, vector<4xi64>
   %0 = arith.extui %arg0 : vector<4xi1> to vector<4xi64>
   return %0 : vector<4xi64>
 }
 
 // CHECK-LABEL: @trunci1
 func.func @trunci1(%arg0 : i64) -> i16 {
-  // CHECK: spv.SConvert %{{.*}} : i64 to i16
+  // CHECK: spirv.SConvert %{{.*}} : i64 to i16
   %0 = arith.trunci %arg0 : i64 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunci2
 func.func @trunci2(%arg0: i32) -> i16 {
-  // CHECK: spv.SConvert %{{.*}} : i32 to i16
+  // CHECK: spirv.SConvert %{{.*}} : i32 to i16
   %0 = arith.trunci %arg0 : i32 to i16
   return %0 : i16
 }
 
 // CHECK-LABEL: @trunc_to_i1
 func.func @trunc_to_i1(%arg0: i32) -> i1 {
-  // CHECK: %[[MASK:.*]] = spv.Constant 1 : i32
-  // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : i32
-  // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : i32
-  // CHECK-DAG: %[[TRUE:.*]] = spv.Constant true
-  // CHECK-DAG: %[[FALSE:.*]] = spv.Constant false
-  // CHECK: spv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : i1, i1
+  // CHECK: %[[MASK:.*]] = spirv.Constant 1 : i32
+  // CHECK: %[[MASKED_SRC:.*]] = spirv.BitwiseAnd %{{.*}}, %[[MASK]] : i32
+  // CHECK: %[[IS_ONE:.*]] = spirv.IEqual %[[MASKED_SRC]], %[[MASK]] : i32
+  // CHECK-DAG: %[[TRUE:.*]] = spirv.Constant true
+  // CHECK-DAG: %[[FALSE:.*]] = spirv.Constant false
+  // CHECK: spirv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : i1, i1
   %0 = arith.trunci %arg0 : i32 to i1
   return %0 : i1
 }
 
 // CHECK-LABEL: @trunc_to_veci1
 func.func @trunc_to_veci1(%arg0: vector<4xi32>) -> vector<4xi1> {
-  // CHECK: %[[MASK:.*]] = spv.Constant dense<1> : vector<4xi32>
-  // CHECK: %[[MASKED_SRC:.*]] = spv.BitwiseAnd %{{.*}}, %[[MASK]] : vector<4xi32>
-  // CHECK: %[[IS_ONE:.*]] = spv.IEqual %[[MASKED_SRC]], %[[MASK]] : vector<4xi32>
-  // CHECK-DAG: %[[TRUE:.*]] = spv.Constant dense<true> : vector<4xi1>
-  // CHECK-DAG: %[[FALSE:.*]] = spv.Constant dense<false> : vector<4xi1>
-  // CHECK: spv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : vector<4xi1>, vector<4xi1>
+  // CHECK: %[[MASK:.*]] = spirv.Constant dense<1> : vector<4xi32>
+  // CHECK: %[[MASKED_SRC:.*]] = spirv.BitwiseAnd %{{.*}}, %[[MASK]] : vector<4xi32>
+  // CHECK: %[[IS_ONE:.*]] = spirv.IEqual %[[MASKED_SRC]], %[[MASK]] : vector<4xi32>
+  // CHECK-DAG: %[[TRUE:.*]] = spirv.Constant dense<true> : vector<4xi1>
+  // CHECK-DAG: %[[FALSE:.*]] = spirv.Constant dense<false> : vector<4xi1>
+  // CHECK: spirv.Select %[[IS_ONE]], %[[TRUE]], %[[FALSE]] : vector<4xi1>, vector<4xi1>
   %0 = arith.trunci %arg0 : vector<4xi32> to vector<4xi1>
   return %0 : vector<4xi1>
 }
 
 // CHECK-LABEL: @fptosi1
 func.func @fptosi1(%arg0 : f32) -> i32 {
-  // CHECK: spv.ConvertFToS %{{.*}} : f32 to i32
+  // CHECK: spirv.ConvertFToS %{{.*}} : f32 to i32
   %0 = arith.fptosi %arg0 : f32 to i32
   return %0 : i32
 }
 
 // CHECK-LABEL: @fptosi2
 func.func @fptosi2(%arg0 : f16) -> i16 {
-  // CHECK: spv.ConvertFToS %{{.*}} : f16 to i16
+  // CHECK: spirv.ConvertFToS %{{.*}} : f16 to i16
   %0 = arith.fptosi %arg0 : f16 to i16
   return %0 : i16
 }
@@ -1907,14 +1907,14 @@ func.func @fptosi2(%arg0 : f16) -> i16 {
 // Checks that cast types will be adjusted when missing special capabilities for
 // certain non-32-bit scalar types.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Float64], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @fpext1
 // CHECK-SAME: %[[A:.*]]: f16
 func.func @fpext1(%arg0: f16) -> f64 {
   // CHECK: %[[ARG:.+]] = builtin.unrealized_conversion_cast %[[A]] : f16 to f32
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f16 to f64
   return %0: f64
 }
@@ -1922,7 +1922,7 @@ func.func @fpext1(%arg0: f16) -> f64 {
 // CHECK-LABEL: @fpext2
 // CHECK-SAME: %[[ARG:.*]]: f32
 func.func @fpext2(%arg0 : f32) -> f64 {
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f64
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f64
   %0 = arith.extf %arg0 : f32 to f64
   return %0: f64
 }
@@ -1934,14 +1934,14 @@ func.func @fpext2(%arg0 : f32) -> f64 {
 // Checks that cast types will be adjusted when missing special capabilities for
 // certain non-32-bit scalar types.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Float16], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @fptrunc1
 // CHECK-SAME: %[[A:.*]]: f64
 func.func @fptrunc1(%arg0 : f64) -> f16 {
   // CHECK: %[[ARG:.+]] = builtin.unrealized_conversion_cast %[[A]] : f64 to f32
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f64 to f16
   return %0: f16
 }
@@ -1949,14 +1949,14 @@ func.func @fptrunc1(%arg0 : f64) -> f16 {
 // CHECK-LABEL: @fptrunc2
 // CHECK-SAME: %[[ARG:.*]]: f32
 func.func @fptrunc2(%arg0: f32) -> f16 {
-  // CHECK-NEXT: spv.FConvert %[[ARG]] : f32 to f16
+  // CHECK-NEXT: spirv.FConvert %[[ARG]] : f32 to f16
   %0 = arith.truncf %arg0 : f32 to f16
   return %0: f16
 }
 
 // CHECK-LABEL: @sitofp
 func.func @sitofp(%arg0 : i64) -> f64 {
-  // CHECK: spv.ConvertSToF %{{.*}} : i32 to f32
+  // CHECK: spirv.ConvertSToF %{{.*}} : i32 to f32
   %0 = arith.sitofp %arg0 : i64 to f64
   return %0: f64
 }

diff  --git a/mlir/test/Conversion/ArithmeticToSPIRV/fast-math.mlir b/mlir/test/Conversion/ArithmeticToSPIRV/fast-math.mlir
index f6a57660a28e..7bac4894078d 100644
--- a/mlir/test/Conversion/ArithmeticToSPIRV/fast-math.mlir
+++ b/mlir/test/Conversion/ArithmeticToSPIRV/fast-math.mlir
@@ -1,13 +1,13 @@
 // RUN: mlir-opt -split-input-file -convert-arith-to-spirv=enable-fast-math -verify-diagnostics %s | FileCheck %s
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @cmpf_ordered
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
 func.func @cmpf_ordered(%arg0 : f32, %arg1 : f32) -> i1 {
-  // CHECK: %[[T:.+]] = spv.Constant true
+  // CHECK: %[[T:.+]] = spirv.Constant true
   %0 = arith.cmpf ord, %arg0, %arg1 : f32
   // CHECK: return %[[T]]
   return %0: i1
@@ -16,7 +16,7 @@ func.func @cmpf_ordered(%arg0 : f32, %arg1 : f32) -> i1 {
 // CHECK-LABEL: @cmpf_unordered
 // CHECK-SAME: %[[LHS:.+]]: vector<4xf32>, %[[RHS:.+]]: vector<4xf32>
 func.func @cmpf_unordered(%arg0 : vector<4xf32>, %arg1 : vector<4xf32>) -> vector<4xi1> {
-  // CHECK: %[[F:.+]] = spv.Constant dense<false>
+  // CHECK: %[[F:.+]] = spirv.Constant dense<false>
   %0 = arith.cmpf uno, %arg0, %arg1 : vector<4xf32>
   // CHECK: return %[[F]]
   return %0: vector<4xi1>
@@ -27,13 +27,13 @@ func.func @cmpf_unordered(%arg0 : vector<4xf32>, %arg1 : vector<4xf32>) -> vecto
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Shader], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Shader], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @minf
 // CHECK-SAME: %[[LHS:.+]]: f32, %[[RHS:.+]]: f32
 func.func @minf(%arg0 : f32, %arg1 : f32) -> f32 {
-  // CHECK: %[[F:.+]] = spv.GL.FMin %[[LHS]], %[[RHS]]
+  // CHECK: %[[F:.+]] = spirv.GL.FMin %[[LHS]], %[[RHS]]
   %0 = arith.minf %arg0, %arg1 : f32
   // CHECK: return %[[F]]
   return %0: f32
@@ -42,7 +42,7 @@ func.func @minf(%arg0 : f32, %arg1 : f32) -> f32 {
 // CHECK-LABEL: @maxf
 // CHECK-SAME: %[[LHS:.+]]: vector<4xf32>, %[[RHS:.+]]: vector<4xf32>
 func.func @maxf(%arg0 : vector<4xf32>, %arg1 : vector<4xf32>) -> vector<4xf32> {
-  // CHECK: %[[F:.+]] = spv.GL.FMax %[[LHS]], %[[RHS]]
+  // CHECK: %[[F:.+]] = spirv.GL.FMax %[[LHS]], %[[RHS]]
   %0 = arith.maxf %arg0, %arg1 : vector<4xf32>
   // CHECK: return %[[F]]
   return %0: vector<4xf32>

diff  --git a/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir b/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
index 7f62dd0acbfe..98ee68914534 100644
--- a/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/ControlFlowToSPIRV/cf-ops-to-spirv.mlir
@@ -5,27 +5,27 @@
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: func @simple_loop
 func.func @simple_loop(%begin: i32, %end: i32, %step: i32) {
-// CHECK-NEXT:  spv.Branch ^bb1
+// CHECK-NEXT:  spirv.Branch ^bb1
   cf.br ^bb1
 
 // CHECK-NEXT: ^bb1:    // pred: ^bb0
-// CHECK-NEXT:  spv.Branch ^bb2({{.*}} : i32)
+// CHECK-NEXT:  spirv.Branch ^bb2({{.*}} : i32)
 ^bb1:   // pred: ^bb0
   cf.br ^bb2(%begin : i32)
 
 // CHECK:      ^bb2({{.*}}: i32):       // 2 preds: ^bb1, ^bb3
-// CHECK:        spv.BranchConditional {{.*}}, ^bb3, ^bb4
+// CHECK:        spirv.BranchConditional {{.*}}, ^bb3, ^bb4
 ^bb2(%0: i32):        // 2 preds: ^bb1, ^bb3
   %1 = arith.cmpi slt, %0, %end : i32
   cf.cond_br %1, ^bb3, ^bb4
 
 // CHECK:      ^bb3:    // pred: ^bb2
-// CHECK:        spv.Branch ^bb2({{.*}} : i32)
+// CHECK:        spirv.Branch ^bb2({{.*}} : i32)
 ^bb3:   // pred: ^bb2
   %2 = arith.addi %0, %step : i32
   cf.br ^bb2(%2 : i32)

diff  --git a/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir b/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
index b92343626bc2..759ab2d6c358 100644
--- a/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/FuncToSPIRV/func-ops-to-spirv.mlir
@@ -5,19 +5,19 @@
 //===----------------------------------------------------------------------===//
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @return_none_val
+// CHECK-LABEL: spirv.func @return_none_val
 func.func @return_none_val() {
-  // CHECK: spv.Return
+  // CHECK: spirv.Return
   return
 }
 
-// CHECK-LABEL: spv.func @return_one_val
+// CHECK-LABEL: spirv.func @return_one_val
 //  CHECK-SAME: (%[[ARG:.+]]: f32)
 func.func @return_one_val(%arg0: f32) -> f32 {
-  // CHECK: spv.ReturnValue %[[ARG]] : f32
+  // CHECK: spirv.ReturnValue %[[ARG]] : f32
   return %arg0: f32
 }
 
@@ -28,21 +28,21 @@ func.func @return_multi_val(%arg0: f32) -> (f32, f32) {
   return %arg0, %arg0: f32, f32
 }
 
-// CHECK-LABEL: spv.func @return_one_index
+// CHECK-LABEL: spirv.func @return_one_index
 //  CHECK-SAME: (%[[ARG:.+]]: i32)
 func.func @return_one_index(%arg0: index) -> index {
-  // CHECK: spv.ReturnValue %[[ARG]] : i32
+  // CHECK: spirv.ReturnValue %[[ARG]] : i32
   return %arg0: index
 }
 
-// CHECK-LABEL: spv.func @call_functions
+// CHECK-LABEL: spirv.func @call_functions
 //  CHECK-SAME: (%[[ARG:.+]]: i32)
 func.func @call_functions(%arg0: index) -> index {
-  // CHECK: spv.FunctionCall @return_none_val() : () -> ()
+  // CHECK: spirv.FunctionCall @return_none_val() : () -> ()
   call @return_none_val(): () -> ()
-  // CHECK: {{%.*}} = spv.FunctionCall @return_one_index(%[[ARG]]) : (i32) -> i32
+  // CHECK: {{%.*}} = spirv.FunctionCall @return_one_index(%[[ARG]]) : (i32) -> i32
   %0 = call @return_one_index(%arg0): (index) -> index
-  // CHECK: spv.ReturnValue {{%.*}} : i32
+  // CHECK: spirv.ReturnValue {{%.*}} : i32
   return %0: index
 }
 

diff  --git a/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir b/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
index 0d3b09402ce2..799d8c34aea1 100644
--- a/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
+++ b/mlir/test/Conversion/FuncToSPIRV/types-to-spirv.mlir
@@ -8,10 +8,10 @@
 // Check that non-32-bit integer types are converted to 32-bit types if the
 // corresponding capabilities are not available.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @integer8
+// CHECK-LABEL: spirv.func @integer8
 // CHECK-SAME: i32
 // CHECK-SAME: si32
 // CHECK-SAME: ui32
@@ -21,7 +21,7 @@ module attributes {
 // NOEMU-SAME: ui8
 func.func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
 
-// CHECK-LABEL: spv.func @integer16
+// CHECK-LABEL: spirv.func @integer16
 // CHECK-SAME: i32
 // CHECK-SAME: si32
 // CHECK-SAME: ui32
@@ -31,7 +31,7 @@ func.func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
 // NOEMU-SAME: ui16
 func.func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
 
-// CHECK-LABEL: spv.func @integer64
+// CHECK-LABEL: spirv.func @integer64
 // CHECK-SAME: i32
 // CHECK-SAME: si32
 // CHECK-SAME: ui32
@@ -48,34 +48,34 @@ func.func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
 // Check that non-32-bit integer types are kept untouched if the corresponding
 // capabilities are available.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Int8, Int16, Int64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Int8, Int16, Int64], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @integer8
+// CHECK-LABEL: spirv.func @integer8
 // CHECK-SAME: i8
 // CHECK-SAME: si8
 // CHECK-SAME: ui8
-// NOEMU-LABEL: spv.func @integer8
+// NOEMU-LABEL: spirv.func @integer8
 // NOEMU-SAME: i8
 // NOEMU-SAME: si8
 // NOEMU-SAME: ui8
 func.func @integer8(%arg0: i8, %arg1: si8, %arg2: ui8) { return }
 
-// CHECK-LABEL: spv.func @integer16
+// CHECK-LABEL: spirv.func @integer16
 // CHECK-SAME: i16
 // CHECK-SAME: si16
 // CHECK-SAME: ui16
-// NOEMU-LABEL: spv.func @integer16
+// NOEMU-LABEL: spirv.func @integer16
 // NOEMU-SAME: i16
 // NOEMU-SAME: si16
 // NOEMU-SAME: ui16
 func.func @integer16(%arg0: i16, %arg1: si16, %arg2: ui16) { return }
 
-// CHECK-LABEL: spv.func @integer64
+// CHECK-LABEL: spirv.func @integer64
 // CHECK-SAME: i64
 // CHECK-SAME: si64
 // CHECK-SAME: ui64
-// NOEMU-LABEL: spv.func @integer64
+// NOEMU-LABEL: spirv.func @integer64
 // NOEMU-SAME: i64
 // NOEMU-SAME: si64
 // NOEMU-SAME: ui64
@@ -87,16 +87,16 @@ func.func @integer64(%arg0: i64, %arg1: si64, %arg2: ui64) { return }
 
 // Check that weird bitwidths are not supported.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-NOT: spv.func @integer4
+// CHECK-NOT: spirv.func @integer4
 func.func @integer4(%arg0: i4) { return }
 
-// CHECK-NOT: spv.func @integer128
+// CHECK-NOT: spirv.func @integer128
 func.func @integer128(%arg0: i128) { return }
 
-// CHECK-NOT: spv.func @integer42
+// CHECK-NOT: spirv.func @integer42
 func.func @integer42(%arg0: i42) { return }
 
 } // end module
@@ -108,10 +108,10 @@ func.func @integer42(%arg0: i42) { return }
 
 // The index type is always converted into i32.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @index_type
+// CHECK-LABEL: spirv.func @index_type
 // CHECK-SAME: %{{.*}}: i32
 func.func @index_type(%arg0: index) { return }
 
@@ -126,16 +126,16 @@ func.func @index_type(%arg0: index) { return }
 // Check that non-32-bit float types are converted to 32-bit types if the
 // corresponding capabilities are not available.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @float16
+// CHECK-LABEL: spirv.func @float16
 // CHECK-SAME: f32
 // NOEMU-LABEL: func @float16
 // NOEMU-SAME: f16
 func.func @float16(%arg0: f16) { return }
 
-// CHECK-LABEL: spv.func @float64
+// CHECK-LABEL: spirv.func @float64
 // CHECK-SAME: f32
 // NOEMU-LABEL: func @float64
 // NOEMU-SAME: f64
@@ -148,18 +148,18 @@ func.func @float64(%arg0: f64) { return }
 // Check that non-32-bit float types are kept untouched if the corresponding
 // capabilities are available.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16, Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @float16
+// CHECK-LABEL: spirv.func @float16
 // CHECK-SAME: f16
-// NOEMU-LABEL: spv.func @float16
+// NOEMU-LABEL: spirv.func @float16
 // NOEMU-SAME: f16
 func.func @float16(%arg0: f16) { return }
 
-// CHECK-LABEL: spv.func @float64
+// CHECK-LABEL: spirv.func @float64
 // CHECK-SAME: f64
-// NOEMU-LABEL: spv.func @float64
+// NOEMU-LABEL: spirv.func @float64
 // NOEMU-SAME: f64
 func.func @float64(%arg0: f64) { return }
 
@@ -169,10 +169,10 @@ func.func @float64(%arg0: f64) { return }
 
 // Check that bf16 is not supported.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-NOT: spv.func @bf16_type
+// CHECK-NOT: spirv.func @bf16_type
 func.func @bf16_type(%arg0: bf16) { return }
 
 } // end module
@@ -186,10 +186,10 @@ func.func @bf16_type(%arg0: bf16) { return }
 // Check that capabilities for scalar types affects vector types too: no special
 // capabilities available means using turning element types to 32-bit.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @int_vector
+// CHECK-LABEL: spirv.func @int_vector
 // CHECK-SAME: vector<2xi32>
 // CHECK-SAME: vector<3xsi32>
 // CHECK-SAME: vector<4xui32>
@@ -199,7 +199,7 @@ func.func @int_vector(
   %arg2: vector<4xui64>
 ) { return }
 
-// CHECK-LABEL: spv.func @float_vector
+// CHECK-LABEL: spirv.func @float_vector
 // CHECK-SAME: vector<2xf32>
 // CHECK-SAME: vector<3xf32>
 func.func @float_vector(
@@ -214,11 +214,11 @@ func.func @float_vector(
 // Check that capabilities for scalar types affects vector types too: having
 // special capabilities means keep vector types untouched.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @int_vector
+// CHECK-LABEL: spirv.func @int_vector
 // CHECK-SAME: vector<2xi8>
 // CHECK-SAME: vector<3xsi16>
 // CHECK-SAME: vector<4xui64>
@@ -228,7 +228,7 @@ func.func @int_vector(
   %arg2: vector<4xui64>
 ) { return }
 
-// CHECK-LABEL: spv.func @float_vector
+// CHECK-LABEL: spirv.func @float_vector
 // CHECK-SAME: vector<2xf16>
 // CHECK-SAME: vector<3xf64>
 func.func @float_vector(
@@ -236,11 +236,11 @@ func.func @float_vector(
   %arg1: vector<3xf64>
 ) { return }
 
-// CHECK-LABEL: spv.func @one_element_vector
+// CHECK-LABEL: spirv.func @one_element_vector
 // CHECK-SAME: %{{.+}}: i32
 func.func @one_element_vector(%arg0: vector<1xi32>) { return }
 
-// CHECK-LABEL: spv.func @zerod_vector
+// CHECK-LABEL: spirv.func @zerod_vector
 //  CHECK-SAME: %{{.+}}: f32
 func.func @zerod_vector(%arg0: vector<f32>) { return }
 
@@ -250,10 +250,10 @@ func.func @zerod_vector(%arg0: vector<f32>) { return }
 
 // Check that > 4-element vectors are not supported.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-NOT: spv.func @large_vector
+// CHECK-NOT: spirv.func @large_vector
 func.func @large_vector(%arg0: vector<1024xi32>) { return }
 
 } // end module
@@ -266,8 +266,8 @@ func.func @large_vector(%arg0: vector<1024xi32>) { return }
 
 // Check memory spaces.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: func @memref_mem_space
@@ -278,23 +278,23 @@ module attributes {
 // CHECK-SAME: Private
 // CHECK-SAME: Function
 func.func @memref_mem_space(
-    %arg0: memref<4xf32, #spv.storage_class<StorageBuffer>>,
-    %arg1: memref<4xf32, #spv.storage_class<Uniform>>,
-    %arg2: memref<4xf32, #spv.storage_class<Workgroup>>,
-    %arg3: memref<4xf32, #spv.storage_class<PushConstant>>,
-    %arg4: memref<4xf32, #spv.storage_class<Private>>,
-    %arg5: memref<4xf32, #spv.storage_class<Function>>
+    %arg0: memref<4xf32, #spirv.storage_class<StorageBuffer>>,
+    %arg1: memref<4xf32, #spirv.storage_class<Uniform>>,
+    %arg2: memref<4xf32, #spirv.storage_class<Workgroup>>,
+    %arg3: memref<4xf32, #spirv.storage_class<PushConstant>>,
+    %arg4: memref<4xf32, #spirv.storage_class<Private>>,
+    %arg5: memref<4xf32, #spirv.storage_class<Function>>
 ) { return }
 
 // CHECK-LABEL: func @memref_1bit_type
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<8 x i32, stride=4> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<8 x i32>)>, Function>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<8 x i32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<8 x i32>)>, Function>
 // NOEMU-LABEL: func @memref_1bit_type
-// NOEMU-SAME: memref<4x8xi1, #spv.storage_class<StorageBuffer>>
-// NOEMU-SAME: memref<4x8xi1, #spv.storage_class<Function>>
+// NOEMU-SAME: memref<4x8xi1, #spirv.storage_class<StorageBuffer>>
+// NOEMU-SAME: memref<4x8xi1, #spirv.storage_class<Function>>
 func.func @memref_1bit_type(
-    %arg0: memref<4x8xi1, #spv.storage_class<StorageBuffer>>,
-    %arg1: memref<4x8xi1, #spv.storage_class<Function>>
+    %arg0: memref<4x8xi1, #spirv.storage_class<StorageBuffer>>,
+    %arg1: memref<4x8xi1, #spirv.storage_class<Function>>
 ) { return }
 
 } // end module
@@ -303,8 +303,8 @@ func.func @memref_1bit_type(
 
 // Reject memory spaces.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: func @numeric_memref_mem_space1
@@ -327,93 +327,93 @@ func.func @numeric_memref_mem_space2(%arg0: memref<4xf32, 3>) { return }
 // requires special capability and extension: convert them to 32-bit if not
 // satisfied.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // An i1 is store in 8-bit, so 5xi1 has 40 bits, which is stored in 2xi32.
-// CHECK-LABEL: spv.func @memref_1bit_type
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<2 x i32, stride=4> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_1bit_type
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<2 x i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_1bit_type
-// NOEMU-SAME: memref<5xi1, #spv.storage_class<StorageBuffer>>
-func.func @memref_1bit_type(%arg0: memref<5xi1, #spv.storage_class<StorageBuffer>>) { return }
+// NOEMU-SAME: memref<5xi1, #spirv.storage_class<StorageBuffer>>
+func.func @memref_1bit_type(%arg0: memref<5xi1, #spirv.storage_class<StorageBuffer>>) { return }
 
-// CHECK-LABEL: spv.func @memref_8bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<4 x i32, stride=4> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_8bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<4 x i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_8bit_StorageBuffer
-// NOEMU-SAME: memref<16xi8, #spv.storage_class<StorageBuffer>>
-func.func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, #spv.storage_class<StorageBuffer>>) { return }
+// NOEMU-SAME: memref<16xi8, #spirv.storage_class<StorageBuffer>>
+func.func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, #spirv.storage_class<StorageBuffer>>) { return }
 
-// CHECK-LABEL: spv.func @memref_8bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<4 x si32, stride=4> [0])>, Uniform>
+// CHECK-LABEL: spirv.func @memref_8bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<4 x si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_8bit_Uniform
-// NOEMU-SAME: memref<16xsi8, #spv.storage_class<Uniform>>
-func.func @memref_8bit_Uniform(%arg0: memref<16xsi8, #spv.storage_class<Uniform>>) { return }
+// NOEMU-SAME: memref<16xsi8, #spirv.storage_class<Uniform>>
+func.func @memref_8bit_Uniform(%arg0: memref<16xsi8, #spirv.storage_class<Uniform>>) { return }
 
-// CHECK-LABEL: spv.func @memref_8bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<4 x ui32, stride=4> [0])>, PushConstant>
+// CHECK-LABEL: spirv.func @memref_8bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<4 x ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_8bit_PushConstant
-// NOEMU-SAME: memref<16xui8, #spv.storage_class<PushConstant>>
-func.func @memref_8bit_PushConstant(%arg0: memref<16xui8, #spv.storage_class<PushConstant>>) { return }
+// NOEMU-SAME: memref<16xui8, #spirv.storage_class<PushConstant>>
+func.func @memref_8bit_PushConstant(%arg0: memref<16xui8, #spirv.storage_class<PushConstant>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<8 x i32, stride=4> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_16bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<8 x i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_16bit_StorageBuffer
-// NOEMU-SAME: memref<16xi16, #spv.storage_class<StorageBuffer>>
-func.func @memref_16bit_StorageBuffer(%arg0: memref<16xi16, #spv.storage_class<StorageBuffer>>) { return }
+// NOEMU-SAME: memref<16xi16, #spirv.storage_class<StorageBuffer>>
+func.func @memref_16bit_StorageBuffer(%arg0: memref<16xi16, #spirv.storage_class<StorageBuffer>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<8 x si32, stride=4> [0])>, Uniform>
+// CHECK-LABEL: spirv.func @memref_16bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<8 x si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_16bit_Uniform
-// NOEMU-SAME: memref<16xsi16, #spv.storage_class<Uniform>>
-func.func @memref_16bit_Uniform(%arg0: memref<16xsi16, #spv.storage_class<Uniform>>) { return }
+// NOEMU-SAME: memref<16xsi16, #spirv.storage_class<Uniform>>
+func.func @memref_16bit_Uniform(%arg0: memref<16xsi16, #spirv.storage_class<Uniform>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<8 x ui32, stride=4> [0])>, PushConstant>
+// CHECK-LABEL: spirv.func @memref_16bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<8 x ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_16bit_PushConstant
-// NOEMU-SAME: memref<16xui16, #spv.storage_class<PushConstant>>
-func.func @memref_16bit_PushConstant(%arg0: memref<16xui16, #spv.storage_class<PushConstant>>) { return }
+// NOEMU-SAME: memref<16xui16, #spirv.storage_class<PushConstant>>
+func.func @memref_16bit_PushConstant(%arg0: memref<16xui16, #spirv.storage_class<PushConstant>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_Input
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<8 x f32>)>, Input>
+// CHECK-LABEL: spirv.func @memref_16bit_Input
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<8 x f32>)>, Input>
 // NOEMU-LABEL: func @memref_16bit_Input
-// NOEMU-SAME: memref<16xf16, #spv.storage_class<Input>>
-func.func @memref_16bit_Input(%arg3: memref<16xf16, #spv.storage_class<Input>>) { return }
+// NOEMU-SAME: memref<16xf16, #spirv.storage_class<Input>>
+func.func @memref_16bit_Input(%arg3: memref<16xf16, #spirv.storage_class<Input>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_Output
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<8 x f32>)>, Output>
+// CHECK-LABEL: spirv.func @memref_16bit_Output
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<8 x f32>)>, Output>
 // NOEMU-LABEL: func @memref_16bit_Output
-// NOEMU-SAME: memref<16xf16, #spv.storage_class<Output>>
-func.func @memref_16bit_Output(%arg4: memref<16xf16, #spv.storage_class<Output>>) { return }
+// NOEMU-SAME: memref<16xf16, #spirv.storage_class<Output>>
+func.func @memref_16bit_Output(%arg4: memref<16xf16, #spirv.storage_class<Output>>) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<32 x i32, stride=4> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_64bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<32 x i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_64bit_StorageBuffer
-// NOEMU-SAME: memref<16xi64, #spv.storage_class<StorageBuffer>>
-func.func @memref_64bit_StorageBuffer(%arg0: memref<16xi64, #spv.storage_class<StorageBuffer>>) { return }
+// NOEMU-SAME: memref<16xi64, #spirv.storage_class<StorageBuffer>>
+func.func @memref_64bit_StorageBuffer(%arg0: memref<16xi64, #spirv.storage_class<StorageBuffer>>) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<32 x si32, stride=4> [0])>, Uniform>
+// CHECK-LABEL: spirv.func @memref_64bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<32 x si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_64bit_Uniform
-// NOEMU-SAME: memref<16xsi64, #spv.storage_class<Uniform>>
-func.func @memref_64bit_Uniform(%arg0: memref<16xsi64, #spv.storage_class<Uniform>>) { return }
+// NOEMU-SAME: memref<16xsi64, #spirv.storage_class<Uniform>>
+func.func @memref_64bit_Uniform(%arg0: memref<16xsi64, #spirv.storage_class<Uniform>>) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<32 x ui32, stride=4> [0])>, PushConstant>
+// CHECK-LABEL: spirv.func @memref_64bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<32 x ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_64bit_PushConstant
-// NOEMU-SAME: memref<16xui64, #spv.storage_class<PushConstant>>
-func.func @memref_64bit_PushConstant(%arg0: memref<16xui64, #spv.storage_class<PushConstant>>) { return }
+// NOEMU-SAME: memref<16xui64, #spirv.storage_class<PushConstant>>
+func.func @memref_64bit_PushConstant(%arg0: memref<16xui64, #spirv.storage_class<PushConstant>>) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_Input
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<32 x f32>)>, Input>
+// CHECK-LABEL: spirv.func @memref_64bit_Input
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<32 x f32>)>, Input>
 // NOEMU-LABEL: func @memref_64bit_Input
-// NOEMU-SAME: memref<16xf64, #spv.storage_class<Input>>
-func.func @memref_64bit_Input(%arg3: memref<16xf64, #spv.storage_class<Input>>) { return }
+// NOEMU-SAME: memref<16xf64, #spirv.storage_class<Input>>
+func.func @memref_64bit_Input(%arg3: memref<16xf64, #spirv.storage_class<Input>>) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_Output
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<32 x f32>)>, Output>
+// CHECK-LABEL: spirv.func @memref_64bit_Output
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<32 x f32>)>, Output>
 // NOEMU-LABEL: func @memref_64bit_Output
-// NOEMU-SAME: memref<16xf64, #spv.storage_class<Output>>
-func.func @memref_64bit_Output(%arg4: memref<16xf64, #spv.storage_class<Output>>) { return }
+// NOEMU-SAME: memref<16xf64, #spirv.storage_class<Output>>
+func.func @memref_64bit_Output(%arg4: memref<16xf64, #spirv.storage_class<Output>>) { return }
 
 } // end module
 
@@ -423,37 +423,37 @@ func.func @memref_64bit_Output(%arg4: memref<16xf64, #spv.storage_class<Output>>
 // requires special capability and extension: keep as-is when the capability
 // and extension is available.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [StoragePushConstant8, StoragePushConstant16, Int64, Float64],
-             [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [StoragePushConstant8, StoragePushConstant16, Int64, Float64],
+             [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @memref_8bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, PushConstant>
-// NOEMU-LABEL: spv.func @memref_8bit_PushConstant
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, PushConstant>
-func.func @memref_8bit_PushConstant(%arg0: memref<16xi8, #spv.storage_class<PushConstant>>) { return }
-
-// CHECK-LABEL: spv.func @memref_16bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, PushConstant>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, PushConstant>
-// NOEMU-LABEL: spv.func @memref_16bit_PushConstant
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, PushConstant>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, PushConstant>
+// CHECK-LABEL: spirv.func @memref_8bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i8, stride=1> [0])>, PushConstant>
+// NOEMU-LABEL: spirv.func @memref_8bit_PushConstant
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i8, stride=1> [0])>, PushConstant>
+func.func @memref_8bit_PushConstant(%arg0: memref<16xi8, #spirv.storage_class<PushConstant>>) { return }
+
+// CHECK-LABEL: spirv.func @memref_16bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16, stride=2> [0])>, PushConstant>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16, stride=2> [0])>, PushConstant>
+// NOEMU-LABEL: spirv.func @memref_16bit_PushConstant
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16, stride=2> [0])>, PushConstant>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16, stride=2> [0])>, PushConstant>
 func.func @memref_16bit_PushConstant(
-  %arg0: memref<16xi16, #spv.storage_class<PushConstant>>,
-  %arg1: memref<16xf16, #spv.storage_class<PushConstant>>
+  %arg0: memref<16xi16, #spirv.storage_class<PushConstant>>,
+  %arg1: memref<16xf16, #spirv.storage_class<PushConstant>>
 ) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64, stride=8> [0])>, PushConstant>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64, stride=8> [0])>, PushConstant>
-// NOEMU-LABEL: spv.func @memref_64bit_PushConstant
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64, stride=8> [0])>, PushConstant>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64, stride=8> [0])>, PushConstant>
+// CHECK-LABEL: spirv.func @memref_64bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64, stride=8> [0])>, PushConstant>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64, stride=8> [0])>, PushConstant>
+// NOEMU-LABEL: spirv.func @memref_64bit_PushConstant
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64, stride=8> [0])>, PushConstant>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64, stride=8> [0])>, PushConstant>
 func.func @memref_64bit_PushConstant(
-  %arg0: memref<16xi64, #spv.storage_class<PushConstant>>,
-  %arg1: memref<16xf64, #spv.storage_class<PushConstant>>
+  %arg0: memref<16xi64, #spirv.storage_class<PushConstant>>,
+  %arg1: memref<16xf64, #spirv.storage_class<PushConstant>>
 ) { return }
 
 } // end module
@@ -464,37 +464,37 @@ func.func @memref_64bit_PushConstant(
 // requires special capability and extension: keep as-is when the capability
 // and extension is available.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [StorageBuffer8BitAccess, StorageBuffer16BitAccess, Int64, Float64],
-             [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [StorageBuffer8BitAccess, StorageBuffer16BitAccess, Int64, Float64],
+             [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @memref_8bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, StorageBuffer>
-// NOEMU-LABEL: spv.func @memref_8bit_StorageBuffer
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, StorageBuffer>
-func.func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, #spv.storage_class<StorageBuffer>>) { return }
-
-// CHECK-LABEL: spv.func @memref_16bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, StorageBuffer>
-// NOEMU-LABEL: spv.func @memref_16bit_StorageBuffer
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, StorageBuffer>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_8bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i8, stride=1> [0])>, StorageBuffer>
+// NOEMU-LABEL: spirv.func @memref_8bit_StorageBuffer
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i8, stride=1> [0])>, StorageBuffer>
+func.func @memref_8bit_StorageBuffer(%arg0: memref<16xi8, #spirv.storage_class<StorageBuffer>>) { return }
+
+// CHECK-LABEL: spirv.func @memref_16bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16, stride=2> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16, stride=2> [0])>, StorageBuffer>
+// NOEMU-LABEL: spirv.func @memref_16bit_StorageBuffer
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16, stride=2> [0])>, StorageBuffer>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16, stride=2> [0])>, StorageBuffer>
 func.func @memref_16bit_StorageBuffer(
-  %arg0: memref<16xi16, #spv.storage_class<StorageBuffer>>,
-  %arg1: memref<16xf16, #spv.storage_class<StorageBuffer>>
+  %arg0: memref<16xi16, #spirv.storage_class<StorageBuffer>>,
+  %arg1: memref<16xf16, #spirv.storage_class<StorageBuffer>>
 ) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64, stride=8> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64, stride=8> [0])>, StorageBuffer>
-// NOEMU-LABEL: spv.func @memref_64bit_StorageBuffer
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64, stride=8> [0])>, StorageBuffer>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64, stride=8> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_64bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64, stride=8> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64, stride=8> [0])>, StorageBuffer>
+// NOEMU-LABEL: spirv.func @memref_64bit_StorageBuffer
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64, stride=8> [0])>, StorageBuffer>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64, stride=8> [0])>, StorageBuffer>
 func.func @memref_64bit_StorageBuffer(
-  %arg0: memref<16xi64, #spv.storage_class<StorageBuffer>>,
-  %arg1: memref<16xf64, #spv.storage_class<StorageBuffer>>
+  %arg0: memref<16xi64, #spirv.storage_class<StorageBuffer>>,
+  %arg1: memref<16xf64, #spirv.storage_class<StorageBuffer>>
 ) { return }
 
 } // end module
@@ -505,37 +505,37 @@ func.func @memref_64bit_StorageBuffer(
 // requires special capability and extension: keep as-is when the capability
 // and extension is available.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [UniformAndStorageBuffer8BitAccess, StorageUniform16, Int64, Float64],
-             [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [UniformAndStorageBuffer8BitAccess, StorageUniform16, Int64, Float64],
+             [SPV_KHR_8bit_storage, SPV_KHR_16bit_storage]>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @memref_8bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, Uniform>
-// NOEMU-LABEL: spv.func @memref_8bit_Uniform
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i8, stride=1> [0])>, Uniform>
-func.func @memref_8bit_Uniform(%arg0: memref<16xi8, #spv.storage_class<Uniform>>) { return }
-
-// CHECK-LABEL: spv.func @memref_16bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, Uniform>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, Uniform>
-// NOEMU-LABEL: spv.func @memref_16bit_Uniform
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16, stride=2> [0])>, Uniform>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16, stride=2> [0])>, Uniform>
+// CHECK-LABEL: spirv.func @memref_8bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i8, stride=1> [0])>, Uniform>
+// NOEMU-LABEL: spirv.func @memref_8bit_Uniform
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i8, stride=1> [0])>, Uniform>
+func.func @memref_8bit_Uniform(%arg0: memref<16xi8, #spirv.storage_class<Uniform>>) { return }
+
+// CHECK-LABEL: spirv.func @memref_16bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16, stride=2> [0])>, Uniform>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16, stride=2> [0])>, Uniform>
+// NOEMU-LABEL: spirv.func @memref_16bit_Uniform
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16, stride=2> [0])>, Uniform>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16, stride=2> [0])>, Uniform>
 func.func @memref_16bit_Uniform(
-  %arg0: memref<16xi16, #spv.storage_class<Uniform>>,
-  %arg1: memref<16xf16, #spv.storage_class<Uniform>>
+  %arg0: memref<16xi16, #spirv.storage_class<Uniform>>,
+  %arg1: memref<16xf16, #spirv.storage_class<Uniform>>
 ) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64, stride=8> [0])>, Uniform>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64, stride=8> [0])>, Uniform>
-// NOEMU-LABEL: spv.func @memref_64bit_Uniform
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64, stride=8> [0])>, Uniform>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64, stride=8> [0])>, Uniform>
+// CHECK-LABEL: spirv.func @memref_64bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64, stride=8> [0])>, Uniform>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64, stride=8> [0])>, Uniform>
+// NOEMU-LABEL: spirv.func @memref_64bit_Uniform
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64, stride=8> [0])>, Uniform>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64, stride=8> [0])>, Uniform>
 func.func @memref_64bit_Uniform(
-  %arg0: memref<16xi64, #spv.storage_class<Uniform>>,
-  %arg1: memref<16xf64, #spv.storage_class<Uniform>>
+  %arg0: memref<16xi64, #spirv.storage_class<Uniform>>,
+  %arg1: memref<16xf64, #spirv.storage_class<Uniform>>
 ) { return }
 
 } // end module
@@ -546,42 +546,42 @@ func.func @memref_64bit_Uniform(
 // requires special capability and extension: keep as-is when the capability
 // and extension is available.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [StorageInputOutput16, Int64, Float64], [SPV_KHR_16bit_storage]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [StorageInputOutput16, Int64, Float64], [SPV_KHR_16bit_storage]>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @memref_16bit_Input
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16>)>, Input>
-// NOEMU-LABEL: spv.func @memref_16bit_Input
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f16>)>, Input>
-func.func @memref_16bit_Input(%arg3: memref<16xf16, #spv.storage_class<Input>>) { return }
-
-// CHECK-LABEL: spv.func @memref_16bit_Output
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16>)>, Output>
-// NOEMU-LABEL: spv.func @memref_16bit_Output
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i16>)>, Output>
-func.func @memref_16bit_Output(%arg4: memref<16xi16, #spv.storage_class<Output>>) { return }
-
-// CHECK-LABEL: spv.func @memref_64bit_Input
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64>)>, Input>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64>)>, Input>
-// NOEMU-LABEL: spv.func @memref_64bit_Input
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64>)>, Input>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64>)>, Input>
+// CHECK-LABEL: spirv.func @memref_16bit_Input
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16>)>, Input>
+// NOEMU-LABEL: spirv.func @memref_16bit_Input
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f16>)>, Input>
+func.func @memref_16bit_Input(%arg3: memref<16xf16, #spirv.storage_class<Input>>) { return }
+
+// CHECK-LABEL: spirv.func @memref_16bit_Output
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16>)>, Output>
+// NOEMU-LABEL: spirv.func @memref_16bit_Output
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i16>)>, Output>
+func.func @memref_16bit_Output(%arg4: memref<16xi16, #spirv.storage_class<Output>>) { return }
+
+// CHECK-LABEL: spirv.func @memref_64bit_Input
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64>)>, Input>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64>)>, Input>
+// NOEMU-LABEL: spirv.func @memref_64bit_Input
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64>)>, Input>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64>)>, Input>
 func.func @memref_64bit_Input(
-  %arg0: memref<16xi64, #spv.storage_class<Input>>,
-  %arg1: memref<16xf64, #spv.storage_class<Input>>
+  %arg0: memref<16xi64, #spirv.storage_class<Input>>,
+  %arg1: memref<16xf64, #spirv.storage_class<Input>>
 ) { return }
 
-// CHECK-LABEL: spv.func @memref_64bit_Output
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64>)>, Output>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64>)>, Output>
-// NOEMU-LABEL: spv.func @memref_64bit_Output
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x i64>)>, Output>
-// NOEMU-SAME: !spv.ptr<!spv.struct<(!spv.array<16 x f64>)>, Output>
+// CHECK-LABEL: spirv.func @memref_64bit_Output
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64>)>, Output>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64>)>, Output>
+// NOEMU-LABEL: spirv.func @memref_64bit_Output
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x i64>)>, Output>
+// NOEMU-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<16 x f64>)>, Output>
 func.func @memref_64bit_Output(
-  %arg0: memref<16xi64, #spv.storage_class<Output>>,
-  %arg1: memref<16xf64, #spv.storage_class<Output>>
+  %arg0: memref<16xi64, #spirv.storage_class<Output>>,
+  %arg1: memref<16xf64, #spirv.storage_class<Output>>
 ) { return }
 
 } // end module
@@ -590,33 +590,33 @@ func.func @memref_64bit_Output(
 
 // Check that memref offset and strides affect the array size.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [StorageBuffer16BitAccess], [SPV_KHR_16bit_storage]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [StorageBuffer16BitAccess], [SPV_KHR_16bit_storage]>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @memref_offset_strides
+// CHECK-LABEL: spirv.func @memref_offset_strides
 func.func @memref_offset_strides(
-// CHECK-SAME: !spv.array<64 x f32, stride=4> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<72 x f32, stride=4> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<256 x f32, stride=4> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<64 x f32, stride=4> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<88 x f32, stride=4> [0])>, StorageBuffer>
-  %arg0: memref<16x4xf32, strided<[4, 1], offset: 0>, #spv.storage_class<StorageBuffer>>,  // tightly packed; row major
-  %arg1: memref<16x4xf32, strided<[4, 1], offset: 8>, #spv.storage_class<StorageBuffer>>,  // offset 8
-  %arg2: memref<16x4xf32, strided<[16, 1], offset: 0>, #spv.storage_class<StorageBuffer>>, // pad 12 after each row
-  %arg3: memref<16x4xf32, strided<[1, 16], offset: 0>, #spv.storage_class<StorageBuffer>>, // tightly packed; col major
-  %arg4: memref<16x4xf32, strided<[1, 22], offset: 0>, #spv.storage_class<StorageBuffer>>, // pad 4 after each col
-
-// CHECK-SAME: !spv.array<64 x f16, stride=2> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<72 x f16, stride=2> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<256 x f16, stride=2> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<64 x f16, stride=2> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.array<88 x f16, stride=2> [0])>, StorageBuffer>
-  %arg5: memref<16x4xf16, strided<[4, 1], offset: 0>, #spv.storage_class<StorageBuffer>>,
-  %arg6: memref<16x4xf16, strided<[4, 1], offset: 8>, #spv.storage_class<StorageBuffer>>,
-  %arg7: memref<16x4xf16, strided<[16, 1], offset: 0>, #spv.storage_class<StorageBuffer>>,
-  %arg8: memref<16x4xf16, strided<[1, 16], offset: 0>, #spv.storage_class<StorageBuffer>>,
-  %arg9: memref<16x4xf16, strided<[1, 22], offset: 0>, #spv.storage_class<StorageBuffer>>
+// CHECK-SAME: !spirv.array<64 x f32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<72 x f32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<256 x f32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<64 x f32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<88 x f32, stride=4> [0])>, StorageBuffer>
+  %arg0: memref<16x4xf32, strided<[4, 1], offset: 0>, #spirv.storage_class<StorageBuffer>>,  // tightly packed; row major
+  %arg1: memref<16x4xf32, strided<[4, 1], offset: 8>, #spirv.storage_class<StorageBuffer>>,  // offset 8
+  %arg2: memref<16x4xf32, strided<[16, 1], offset: 0>, #spirv.storage_class<StorageBuffer>>, // pad 12 after each row
+  %arg3: memref<16x4xf32, strided<[1, 16], offset: 0>, #spirv.storage_class<StorageBuffer>>, // tightly packed; col major
+  %arg4: memref<16x4xf32, strided<[1, 22], offset: 0>, #spirv.storage_class<StorageBuffer>>, // pad 4 after each col
+
+// CHECK-SAME: !spirv.array<64 x f16, stride=2> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<72 x f16, stride=2> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<256 x f16, stride=2> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<64 x f16, stride=2> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.array<88 x f16, stride=2> [0])>, StorageBuffer>
+  %arg5: memref<16x4xf16, strided<[4, 1], offset: 0>, #spirv.storage_class<StorageBuffer>>,
+  %arg6: memref<16x4xf16, strided<[4, 1], offset: 8>, #spirv.storage_class<StorageBuffer>>,
+  %arg7: memref<16x4xf16, strided<[16, 1], offset: 0>, #spirv.storage_class<StorageBuffer>>,
+  %arg8: memref<16x4xf16, strided<[1, 16], offset: 0>, #spirv.storage_class<StorageBuffer>>,
+  %arg9: memref<16x4xf16, strided<[1, 22], offset: 0>, #spirv.storage_class<StorageBuffer>>
 ) { return }
 
 } // end module
@@ -625,7 +625,7 @@ func.func @memref_offset_strides(
 
 // Dynamic shapes
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // Check that unranked shapes are not supported.
@@ -634,69 +634,69 @@ module attributes {
 func.func @unranked_memref(%arg0: memref<*xi32>) { return }
 
 // CHECK-LABEL: func @memref_1bit_type
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_1bit_type
-// NOEMU-SAME: memref<?xi1, #spv.storage_class<StorageBuffer>>
-func.func @memref_1bit_type(%arg0: memref<?xi1, #spv.storage_class<StorageBuffer>>) { return }
+// NOEMU-SAME: memref<?xi1, #spirv.storage_class<StorageBuffer>>
+func.func @memref_1bit_type(%arg0: memref<?xi1, #spirv.storage_class<StorageBuffer>>) { return }
 
 // CHECK-LABEL: func @dynamic_dim_memref
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<f32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<i32, stride=4> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<f32, stride=4> [0])>, StorageBuffer>
 func.func @dynamic_dim_memref(
-    %arg0: memref<8x?xi32, #spv.storage_class<StorageBuffer>>,
-    %arg1: memref<?x?xf32, #spv.storage_class<StorageBuffer>>) { return }
+    %arg0: memref<8x?xi32, #spirv.storage_class<StorageBuffer>>,
+    %arg1: memref<?x?xf32, #spirv.storage_class<StorageBuffer>>) { return }
 
 // Check that using non-32-bit scalar types in interface storage classes
 // requires special capability and extension: convert them to 32-bit if not
 // satisfied.
 
-// CHECK-LABEL: spv.func @memref_8bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_8bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_8bit_StorageBuffer
-// NOEMU-SAME: memref<?xi8, #spv.storage_class<StorageBuffer>>
-func.func @memref_8bit_StorageBuffer(%arg0: memref<?xi8, #spv.storage_class<StorageBuffer>>) { return }
+// NOEMU-SAME: memref<?xi8, #spirv.storage_class<StorageBuffer>>
+func.func @memref_8bit_StorageBuffer(%arg0: memref<?xi8, #spirv.storage_class<StorageBuffer>>) { return }
 
-// CHECK-LABEL: spv.func @memref_8bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<si32, stride=4> [0])>, Uniform>
+// CHECK-LABEL: spirv.func @memref_8bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_8bit_Uniform
-// NOEMU-SAME: memref<?xsi8, #spv.storage_class<Uniform>>
-func.func @memref_8bit_Uniform(%arg0: memref<?xsi8, #spv.storage_class<Uniform>>) { return }
+// NOEMU-SAME: memref<?xsi8, #spirv.storage_class<Uniform>>
+func.func @memref_8bit_Uniform(%arg0: memref<?xsi8, #spirv.storage_class<Uniform>>) { return }
 
-// CHECK-LABEL: spv.func @memref_8bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<ui32, stride=4> [0])>, PushConstant>
+// CHECK-LABEL: spirv.func @memref_8bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_8bit_PushConstant
-// NOEMU-SAME: memref<?xui8, #spv.storage_class<PushConstant>>
-func.func @memref_8bit_PushConstant(%arg0: memref<?xui8, #spv.storage_class<PushConstant>>) { return }
+// NOEMU-SAME: memref<?xui8, #spirv.storage_class<PushConstant>>
+func.func @memref_8bit_PushConstant(%arg0: memref<?xui8, #spirv.storage_class<PushConstant>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_StorageBuffer
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
+// CHECK-LABEL: spirv.func @memref_16bit_StorageBuffer
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<i32, stride=4> [0])>, StorageBuffer>
 // NOEMU-LABEL: func @memref_16bit_StorageBuffer
-// NOEMU-SAME: memref<?xi16, #spv.storage_class<StorageBuffer>>
-func.func @memref_16bit_StorageBuffer(%arg0: memref<?xi16, #spv.storage_class<StorageBuffer>>) { return }
+// NOEMU-SAME: memref<?xi16, #spirv.storage_class<StorageBuffer>>
+func.func @memref_16bit_StorageBuffer(%arg0: memref<?xi16, #spirv.storage_class<StorageBuffer>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_Uniform
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<si32, stride=4> [0])>, Uniform>
+// CHECK-LABEL: spirv.func @memref_16bit_Uniform
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<si32, stride=4> [0])>, Uniform>
 // NOEMU-LABEL: func @memref_16bit_Uniform
-// NOEMU-SAME: memref<?xsi16, #spv.storage_class<Uniform>>
-func.func @memref_16bit_Uniform(%arg0: memref<?xsi16, #spv.storage_class<Uniform>>) { return }
+// NOEMU-SAME: memref<?xsi16, #spirv.storage_class<Uniform>>
+func.func @memref_16bit_Uniform(%arg0: memref<?xsi16, #spirv.storage_class<Uniform>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_PushConstant
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<ui32, stride=4> [0])>, PushConstant>
+// CHECK-LABEL: spirv.func @memref_16bit_PushConstant
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<ui32, stride=4> [0])>, PushConstant>
 // NOEMU-LABEL: func @memref_16bit_PushConstant
-// NOEMU-SAME: memref<?xui16, #spv.storage_class<PushConstant>>
-func.func @memref_16bit_PushConstant(%arg0: memref<?xui16, #spv.storage_class<PushConstant>>) { return }
+// NOEMU-SAME: memref<?xui16, #spirv.storage_class<PushConstant>>
+func.func @memref_16bit_PushConstant(%arg0: memref<?xui16, #spirv.storage_class<PushConstant>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_Input
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<f32>)>, Input>
+// CHECK-LABEL: spirv.func @memref_16bit_Input
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<f32>)>, Input>
 // NOEMU-LABEL: func @memref_16bit_Input
-// NOEMU-SAME: memref<?xf16, #spv.storage_class<Input>>
-func.func @memref_16bit_Input(%arg3: memref<?xf16, #spv.storage_class<Input>>) { return }
+// NOEMU-SAME: memref<?xf16, #spirv.storage_class<Input>>
+func.func @memref_16bit_Input(%arg3: memref<?xf16, #spirv.storage_class<Input>>) { return }
 
-// CHECK-LABEL: spv.func @memref_16bit_Output
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<f32>)>, Output>
+// CHECK-LABEL: spirv.func @memref_16bit_Output
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<f32>)>, Output>
 // NOEMU-LABEL: func @memref_16bit_Output
-// NOEMU-SAME: memref<?xf16, #spv.storage_class<Output>>
-func.func @memref_16bit_Output(%arg4: memref<?xf16, #spv.storage_class<Output>>) { return }
+// NOEMU-SAME: memref<?xf16, #spirv.storage_class<Output>>
+func.func @memref_16bit_Output(%arg4: memref<?xf16, #spirv.storage_class<Output>>) { return }
 
 } // end module
 
@@ -704,23 +704,23 @@ func.func @memref_16bit_Output(%arg4: memref<?xf16, #spv.storage_class<Output>>)
 
 // Vector types
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: func @memref_vector
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<4 x vector<2xf32>, stride=8> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<4 x vector<4xf32>, stride=16> [0])>, Uniform>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<4 x vector<2xf32>, stride=8> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<4 x vector<4xf32>, stride=16> [0])>, Uniform>
 func.func @memref_vector(
-    %arg0: memref<4xvector<2xf32>, #spv.storage_class<StorageBuffer>>,
-    %arg1: memref<4xvector<4xf32>, #spv.storage_class<Uniform>>)
+    %arg0: memref<4xvector<2xf32>, #spirv.storage_class<StorageBuffer>>,
+    %arg1: memref<4xvector<4xf32>, #spirv.storage_class<Uniform>>)
 { return }
 
 // CHECK-LABEL: func @dynamic_dim_memref_vector
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<vector<4xi32>, stride=16> [0])>, StorageBuffer>
-// CHECK-SAME: !spv.ptr<!spv.struct<(!spv.rtarray<vector<2xf32>, stride=8> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<vector<4xi32>, stride=16> [0])>, StorageBuffer>
+// CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.rtarray<vector<2xf32>, stride=8> [0])>, StorageBuffer>
 func.func @dynamic_dim_memref_vector(
-    %arg0: memref<8x?xvector<4xi32>, #spv.storage_class<StorageBuffer>>,
-    %arg1: memref<?x?xvector<2xf32>, #spv.storage_class<StorageBuffer>>)
+    %arg0: memref<8x?xvector<4xi32>, #spirv.storage_class<StorageBuffer>>,
+    %arg1: memref<?x?xvector<2xf32>, #spirv.storage_class<StorageBuffer>>)
 { return }
 
 } // end module
@@ -729,13 +729,13 @@ func.func @dynamic_dim_memref_vector(
 
 // Vector types, check that sizes not available in SPIR-V are not transformed.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: func @memref_vector_wrong_size
-// CHECK-SAME: memref<4xvector<5xf32>, #spv.storage_class<StorageBuffer>>
+// CHECK-SAME: memref<4xvector<5xf32>, #spirv.storage_class<StorageBuffer>>
 func.func @memref_vector_wrong_size(
-    %arg0: memref<4xvector<5xf32>, #spv.storage_class<StorageBuffer>>)
+    %arg0: memref<4xvector<5xf32>, #spirv.storage_class<StorageBuffer>>)
 { return }
 
 } // end module
@@ -748,15 +748,15 @@ func.func @memref_vector_wrong_size(
 
 // Check that tensor element types are kept untouched with proper capabilities.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int8, Int16, Int64, Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @int_tensor_types
-// CHECK-SAME: !spv.array<32 x i64>
-// CHECK-SAME: !spv.array<32 x i32>
-// CHECK-SAME: !spv.array<32 x i16>
-// CHECK-SAME: !spv.array<32 x i8>
+// CHECK-LABEL: spirv.func @int_tensor_types
+// CHECK-SAME: !spirv.array<32 x i64>
+// CHECK-SAME: !spirv.array<32 x i32>
+// CHECK-SAME: !spirv.array<32 x i16>
+// CHECK-SAME: !spirv.array<32 x i8>
 func.func @int_tensor_types(
   %arg0: tensor<8x4xi64>,
   %arg1: tensor<8x4xi32>,
@@ -764,10 +764,10 @@ func.func @int_tensor_types(
   %arg3: tensor<8x4xi8>
 ) { return }
 
-// CHECK-LABEL: spv.func @float_tensor_types
-// CHECK-SAME: !spv.array<32 x f64>
-// CHECK-SAME: !spv.array<32 x f32>
-// CHECK-SAME: !spv.array<32 x f16>
+// CHECK-LABEL: spirv.func @float_tensor_types
+// CHECK-SAME: !spirv.array<32 x f64>
+// CHECK-SAME: !spirv.array<32 x f32>
+// CHECK-SAME: !spirv.array<32 x f16>
 func.func @float_tensor_types(
   %arg0: tensor<8x4xf64>,
   %arg1: tensor<8x4xf32>,
@@ -780,14 +780,14 @@ func.func @float_tensor_types(
 
 // Check that tensor element types are changed to 32-bit without capabilities.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK-LABEL: spv.func @int_tensor_types
-// CHECK-SAME: !spv.array<32 x i32>
-// CHECK-SAME: !spv.array<32 x i32>
-// CHECK-SAME: !spv.array<32 x i32>
-// CHECK-SAME: !spv.array<32 x i32>
+// CHECK-LABEL: spirv.func @int_tensor_types
+// CHECK-SAME: !spirv.array<32 x i32>
+// CHECK-SAME: !spirv.array<32 x i32>
+// CHECK-SAME: !spirv.array<32 x i32>
+// CHECK-SAME: !spirv.array<32 x i32>
 func.func @int_tensor_types(
   %arg0: tensor<8x4xi64>,
   %arg1: tensor<8x4xi32>,
@@ -795,10 +795,10 @@ func.func @int_tensor_types(
   %arg3: tensor<8x4xi8>
 ) { return }
 
-// CHECK-LABEL: spv.func @float_tensor_types
-// CHECK-SAME: !spv.array<32 x f32>
-// CHECK-SAME: !spv.array<32 x f32>
-// CHECK-SAME: !spv.array<32 x f32>
+// CHECK-LABEL: spirv.func @float_tensor_types
+// CHECK-SAME: !spirv.array<32 x f32>
+// CHECK-SAME: !spirv.array<32 x f32>
+// CHECK-SAME: !spirv.array<32 x f32>
 func.func @float_tensor_types(
   %arg0: tensor<8x4xf64>,
   %arg1: tensor<8x4xf32>,
@@ -811,7 +811,7 @@ func.func @float_tensor_types(
 
 // Check that dynamic shapes are not supported.
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: func @unranked_tensor

diff  --git a/mlir/test/Conversion/GPUToSPIRV/builtins.mlir b/mlir/test/Conversion/GPUToSPIRV/builtins.mlir
index bcb819a64a53..6414d292b04e 100644
--- a/mlir/test/Conversion/GPUToSPIRV/builtins.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/builtins.mlir
@@ -8,14 +8,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
   gpu.module @kernels {
     gpu.func @builtin_workgroup_id_x() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[WORKGROUPID]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[WORKGROUPID]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
       %0 = gpu.block_id x
       gpu.return
     }
@@ -34,14 +34,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
   gpu.module @kernels {
     gpu.func @builtin_workgroup_id_y() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[WORKGROUPID]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}1 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[WORKGROUPID]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}1 : i32{{\]}}
       %0 = gpu.block_id y
       gpu.return
     }
@@ -58,14 +58,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[WORKGROUPID:@.*]] built_in("WorkgroupId")
   gpu.module @kernels {
     gpu.func @builtin_workgroup_id_z() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[WORKGROUPID]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}2 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[WORKGROUPID]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}2 : i32{{\]}}
       %0 = gpu.block_id z
       gpu.return
     }
@@ -82,15 +82,15 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
   gpu.module @kernels {
     gpu.func @builtin_workgroup_size_x() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 1, 1]>: vector<3xi32>>} {
-      // The constant value is obtained from the spv.entry_point_abi.
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 1, 1]>: vector<3xi32>>} {
+      // The constant value is obtained from the spirv.entry_point_abi.
       // Note that this ignores the workgroup size specification in gpu.launch.
       // We may want to define gpu.workgroup_size and convert it to the entry
       // point ABI we want here.
-      // CHECK: spv.Constant 32 : i32
+      // CHECK: spirv.Constant 32 : i32
       %0 = gpu.block_dim x
       gpu.return
     }
@@ -107,12 +107,12 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
   gpu.module @kernels {
     gpu.func @builtin_workgroup_size_y() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
-      // The constant value is obtained from the spv.entry_point_abi.
-      // CHECK: spv.Constant 4 : i32
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+      // The constant value is obtained from the spirv.entry_point_abi.
+      // CHECK: spirv.Constant 4 : i32
       %0 = gpu.block_dim y
       gpu.return
     }
@@ -129,12 +129,12 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
   gpu.module @kernels {
     gpu.func @builtin_workgroup_size_z() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
-      // The constant value is obtained from the spv.entry_point_abi.
-      // CHECK: spv.Constant 1 : i32
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+      // The constant value is obtained from the spirv.entry_point_abi.
+      // CHECK: spirv.Constant 1 : i32
       %0 = gpu.block_dim z
       gpu.return
     }
@@ -151,14 +151,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[LOCALINVOCATIONID:@.*]] built_in("LocalInvocationId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[LOCALINVOCATIONID:@.*]] built_in("LocalInvocationId")
   gpu.module @kernels {
     gpu.func @builtin_local_id_x() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[LOCALINVOCATIONID]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[LOCALINVOCATIONID]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
       %0 = gpu.thread_id x
       gpu.return
     }
@@ -175,14 +175,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[NUMWORKGROUPS:@.*]] built_in("NumWorkgroups")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[NUMWORKGROUPS:@.*]] built_in("NumWorkgroups")
   gpu.module @kernels {
     gpu.func @builtin_num_workgroups_x() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[NUMWORKGROUPS]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[NUMWORKGROUPS]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
       %0 = gpu.grid_dim x
       gpu.return
     }
@@ -192,13 +192,13 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[SUBGROUPID:@.*]] built_in("SubgroupId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[SUBGROUPID:@.*]] built_in("SubgroupId")
   gpu.module @kernels {
     gpu.func @builtin_subgroup_id() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[SUBGROUPID]]
-      // CHECK-NEXT: {{%.*}} = spv.Load "Input" [[ADDRESS]]
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[SUBGROUPID]]
+      // CHECK-NEXT: {{%.*}} = spirv.Load "Input" [[ADDRESS]]
       %0 = gpu.subgroup_id : index
       gpu.return
     }
@@ -208,13 +208,13 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[NUMSUBGROUPS:@.*]] built_in("NumSubgroups")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[NUMSUBGROUPS:@.*]] built_in("NumSubgroups")
   gpu.module @kernels {
     gpu.func @builtin_num_subgroups() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[NUMSUBGROUPS]]
-      // CHECK-NEXT: {{%.*}} = spv.Load "Input" [[ADDRESS]]
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[NUMSUBGROUPS]]
+      // CHECK-NEXT: {{%.*}} = spirv.Load "Input" [[ADDRESS]]
       %0 = gpu.num_subgroups : index
       gpu.return
     }
@@ -231,14 +231,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}}
-  // CHECK: spv.GlobalVariable [[WORKGROUPSIZE:@.*]] built_in("WorkgroupSize")
+  // CHECK-LABEL:  spirv.module @{{.*}}
+  // CHECK: spirv.GlobalVariable [[WORKGROUPSIZE:@.*]] built_in("WorkgroupSize")
   gpu.module @kernels {
     gpu.func @builtin_workgroup_size_x() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[WORKGROUPSIZE]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[WORKGROUPSIZE]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
       %0 = gpu.block_dim x
       gpu.return
     }
@@ -255,14 +255,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}}
-  // CHECK: spv.GlobalVariable [[WORKGROUPSIZE:@.*]] built_in("WorkgroupSize")
+  // CHECK-LABEL:  spirv.module @{{.*}}
+  // CHECK: spirv.GlobalVariable [[WORKGROUPSIZE:@.*]] built_in("WorkgroupSize")
   gpu.module @kernels {
     gpu.func @builtin_workgroup_size_y() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[WORKGROUPSIZE]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}1 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[WORKGROUPSIZE]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}1 : i32{{\]}}
       %0 = gpu.block_dim y
       gpu.return
     }
@@ -279,14 +279,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}}
-  // CHECK: spv.GlobalVariable [[WORKGROUPSIZE:@.*]] built_in("WorkgroupSize")
+  // CHECK-LABEL:  spirv.module @{{.*}}
+  // CHECK: spirv.GlobalVariable [[WORKGROUPSIZE:@.*]] built_in("WorkgroupSize")
   gpu.module @kernels {
     gpu.func @builtin_workgroup_size_z() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[WORKGROUPSIZE]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}2 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[WORKGROUPSIZE]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}2 : i32{{\]}}
       %0 = gpu.block_dim z
       gpu.return
     }
@@ -303,14 +303,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[GLOBALINVOCATIONID:@.*]] built_in("GlobalInvocationId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[GLOBALINVOCATIONID:@.*]] built_in("GlobalInvocationId")
   gpu.module @kernels {
     gpu.func @builtin_global_id_x() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[GLOBALINVOCATIONID]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[GLOBALINVOCATIONID]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}0 : i32{{\]}}
       %0 = gpu.global_id x
       gpu.return
     }
@@ -327,14 +327,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[GLOBALINVOCATIONID:@.*]] built_in("GlobalInvocationId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[GLOBALINVOCATIONID:@.*]] built_in("GlobalInvocationId")
   gpu.module @kernels {
     gpu.func @builtin_global_id_y() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[GLOBALINVOCATIONID]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}1 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[GLOBALINVOCATIONID]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}1 : i32{{\]}}
       %0 = gpu.global_id y
       gpu.return
     }
@@ -351,14 +351,14 @@ module attributes {gpu.container_module} {
     return
   }
 
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[GLOBALINVOCATIONID:@.*]] built_in("GlobalInvocationId")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[GLOBALINVOCATIONID:@.*]] built_in("GlobalInvocationId")
   gpu.module @kernels {
     gpu.func @builtin_global_id_z() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[GLOBALINVOCATIONID]]
-      // CHECK-NEXT: [[VEC:%.*]] = spv.Load "Input" [[ADDRESS]]
-      // CHECK-NEXT: {{%.*}} = spv.CompositeExtract [[VEC]]{{\[}}2 : i32{{\]}}
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[GLOBALINVOCATIONID]]
+      // CHECK-NEXT: [[VEC:%.*]] = spirv.Load "Input" [[ADDRESS]]
+      // CHECK-NEXT: {{%.*}} = spirv.CompositeExtract [[VEC]]{{\[}}2 : i32{{\]}}
       %0 = gpu.global_id z
       gpu.return
     }
@@ -369,13 +369,13 @@ module attributes {gpu.container_module} {
 // -----
 
 module attributes {gpu.container_module} {
-  // CHECK-LABEL:  spv.module @{{.*}} Logical GLSL450
-  // CHECK: spv.GlobalVariable [[SUBGROUPSIZE:@.*]] built_in("SubgroupSize")
+  // CHECK-LABEL:  spirv.module @{{.*}} Logical GLSL450
+  // CHECK: spirv.GlobalVariable [[SUBGROUPSIZE:@.*]] built_in("SubgroupSize")
   gpu.module @kernels {
     gpu.func @builtin_subgroup_size() kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: [[ADDRESS:%.*]] = spv.mlir.addressof [[SUBGROUPSIZE]]
-      // CHECK-NEXT: {{%.*}} = spv.Load "Input" [[ADDRESS]]
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: [[ADDRESS:%.*]] = spirv.mlir.addressof [[SUBGROUPSIZE]]
+      // CHECK-NEXT: {{%.*}} = spirv.Load "Input" [[ADDRESS]]
       %0 = gpu.subgroup_size : index
       gpu.return
     }

diff  --git a/mlir/test/Conversion/GPUToSPIRV/entry-point.mlir b/mlir/test/Conversion/GPUToSPIRV/entry-point.mlir
index 2293f5a0aec7..8536b2f2ea5b 100644
--- a/mlir/test/Conversion/GPUToSPIRV/entry-point.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/entry-point.mlir
@@ -2,10 +2,10 @@
 // RUN: mlir-opt -test-spirv-entry-point-abi="workgroup-size=32" %s | FileCheck %s -check-prefix=WG32
 
 //      DEFAULT: gpu.func @foo()
-// DEFAULT-SAME: spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<1> : vector<3xi32>>
+// DEFAULT-SAME: spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<1> : vector<3xi32>>
 
 //      WG32: gpu.func @foo()
-// WG32-SAME:  spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 1, 1]> : vector<3xi32>>
+// WG32-SAME:  spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 1, 1]> : vector<3xi32>>
 
 gpu.module @kernels {
   gpu.func @foo() kernel {

diff  --git a/mlir/test/Conversion/GPUToSPIRV/gpu-to-spirv.mlir b/mlir/test/Conversion/GPUToSPIRV/gpu-to-spirv.mlir
index 00a97fc1a990..a8238298bc79 100644
--- a/mlir/test/Conversion/GPUToSPIRV/gpu-to-spirv.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/gpu-to-spirv.mlir
@@ -2,25 +2,25 @@
 
 module attributes {gpu.container_module} {
   gpu.module @kernels {
-    // CHECK:       spv.module @{{.*}} Logical GLSL450 {
-    // CHECK-LABEL: spv.func @basic_module_structure
-    // CHECK-SAME: {{%.*}}: f32 {spv.interface_var_abi = #spv.interface_var_abi<(0, 0), StorageBuffer>}
-    // CHECK-SAME: {{%.*}}: !spv.ptr<!spv.struct<(!spv.array<12 x f32, stride=4> [0])>, StorageBuffer> {spv.interface_var_abi = #spv.interface_var_abi<(0, 1)>}
-    // CHECK-SAME: spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]> : vector<3xi32>>
-    gpu.func @basic_module_structure(%arg0 : f32, %arg1 : memref<12xf32, #spv.storage_class<StorageBuffer>>) kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
-      // CHECK: spv.Return
+    // CHECK:       spirv.module @{{.*}} Logical GLSL450 {
+    // CHECK-LABEL: spirv.func @basic_module_structure
+    // CHECK-SAME: {{%.*}}: f32 {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 0), StorageBuffer>}
+    // CHECK-SAME: {{%.*}}: !spirv.ptr<!spirv.struct<(!spirv.array<12 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 1)>}
+    // CHECK-SAME: spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]> : vector<3xi32>>
+    gpu.func @basic_module_structure(%arg0 : f32, %arg1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>) kernel
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+      // CHECK: spirv.Return
       gpu.return
     }
   }
 
   func.func @main() {
     %0 = "op"() : () -> (f32)
-    %1 = "op"() : () -> (memref<12xf32, #spv.storage_class<StorageBuffer>>)
+    %1 = "op"() : () -> (memref<12xf32, #spirv.storage_class<StorageBuffer>>)
     %cst = arith.constant 1 : index
     gpu.launch_func @kernels::@basic_module_structure
         blocks in (%cst, %cst, %cst) threads in (%cst, %cst, %cst)
-        args(%0 : f32, %1 : memref<12xf32, #spv.storage_class<StorageBuffer>>)
+        args(%0 : f32, %1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>)
     return
   }
 }
@@ -29,21 +29,21 @@ module attributes {gpu.container_module} {
 
 module attributes {gpu.container_module} {
   gpu.module @kernels {
-    // CHECK:       spv.module @{{.*}} Logical GLSL450 {
-    // CHECK-LABEL: spv.func @basic_module_structure_preset_ABI
+    // CHECK:       spirv.module @{{.*}} Logical GLSL450 {
+    // CHECK-LABEL: spirv.func @basic_module_structure_preset_ABI
     // CHECK-SAME: {{%[a-zA-Z0-9_]*}}: f32
-    // CHECK-SAME: spv.interface_var_abi = #spv.interface_var_abi<(1, 2), StorageBuffer>
-    // CHECK-SAME: !spv.ptr<!spv.struct<(!spv.array<12 x f32, stride=4> [0])>, StorageBuffer>
-    // CHECK-SAME: spv.interface_var_abi = #spv.interface_var_abi<(3, 0)>
-    // CHECK-SAME: spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]> : vector<3xi32>>
+    // CHECK-SAME: spirv.interface_var_abi = #spirv.interface_var_abi<(1, 2), StorageBuffer>
+    // CHECK-SAME: !spirv.ptr<!spirv.struct<(!spirv.array<12 x f32, stride=4> [0])>, StorageBuffer>
+    // CHECK-SAME: spirv.interface_var_abi = #spirv.interface_var_abi<(3, 0)>
+    // CHECK-SAME: spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]> : vector<3xi32>>
     gpu.func @basic_module_structure_preset_ABI(
       %arg0 : f32
-        {spv.interface_var_abi = #spv.interface_var_abi<(1, 2), StorageBuffer>},
-      %arg1 : memref<12xf32, #spv.storage_class<StorageBuffer>>
-        {spv.interface_var_abi = #spv.interface_var_abi<(3, 0)>}) kernel
+        {spirv.interface_var_abi = #spirv.interface_var_abi<(1, 2), StorageBuffer>},
+      %arg1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>
+        {spirv.interface_var_abi = #spirv.interface_var_abi<(3, 0)>}) kernel
       attributes
-        {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
-      // CHECK: spv.Return
+        {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+      // CHECK: spirv.Return
       gpu.return
     }
   }
@@ -54,19 +54,19 @@ module attributes {gpu.container_module} {
 module attributes {gpu.container_module} {
   gpu.module @kernels {
     // expected-error @below {{failed to legalize operation 'gpu.func'}}
-    // expected-remark @below {{match failure: missing 'spv.entry_point_abi' attribute}}
-    gpu.func @missing_entry_point_abi(%arg0 : f32, %arg1 : memref<12xf32, #spv.storage_class<StorageBuffer>>) kernel {
+    // expected-remark @below {{match failure: missing 'spirv.entry_point_abi' attribute}}
+    gpu.func @missing_entry_point_abi(%arg0 : f32, %arg1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>) kernel {
       gpu.return
     }
   }
 
   func.func @main() {
     %0 = "op"() : () -> (f32)
-    %1 = "op"() : () -> (memref<12xf32, #spv.storage_class<StorageBuffer>>)
+    %1 = "op"() : () -> (memref<12xf32, #spirv.storage_class<StorageBuffer>>)
     %cst = arith.constant 1 : index
     gpu.launch_func @kernels::@missing_entry_point_abi
         blocks in (%cst, %cst, %cst) threads in (%cst, %cst, %cst)
-        args(%0 : f32, %1 : memref<12xf32, #spv.storage_class<StorageBuffer>>)
+        args(%0 : f32, %1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>)
     return
   }
 }
@@ -76,13 +76,13 @@ module attributes {gpu.container_module} {
 module attributes {gpu.container_module} {
   gpu.module @kernels {
     // expected-error @below {{failed to legalize operation 'gpu.func'}}
-    // expected-remark @below {{match failure: missing 'spv.interface_var_abi' attribute at argument 1}}
+    // expected-remark @below {{match failure: missing 'spirv.interface_var_abi' attribute at argument 1}}
     gpu.func @missing_entry_point_abi(
       %arg0 : f32
-        {spv.interface_var_abi = #spv.interface_var_abi<(1, 2), StorageBuffer>},
-      %arg1 : memref<12xf32, #spv.storage_class<StorageBuffer>>) kernel
+        {spirv.interface_var_abi = #spirv.interface_var_abi<(1, 2), StorageBuffer>},
+      %arg1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>) kernel
     attributes
-      {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+      {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
       gpu.return
     }
   }
@@ -93,13 +93,13 @@ module attributes {gpu.container_module} {
 module attributes {gpu.container_module} {
   gpu.module @kernels {
     // expected-error @below {{failed to legalize operation 'gpu.func'}}
-    // expected-remark @below {{match failure: missing 'spv.interface_var_abi' attribute at argument 0}}
+    // expected-remark @below {{match failure: missing 'spirv.interface_var_abi' attribute at argument 0}}
     gpu.func @missing_entry_point_abi(
       %arg0 : f32,
-      %arg1 : memref<12xf32, #spv.storage_class<StorageBuffer>>
-        {spv.interface_var_abi = #spv.interface_var_abi<(3, 0)>}) kernel
+      %arg1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>
+        {spirv.interface_var_abi = #spirv.interface_var_abi<(3, 0)>}) kernel
     attributes
-      {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+      {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
       gpu.return
     }
   }
@@ -109,10 +109,10 @@ module attributes {gpu.container_module} {
 
 module attributes {gpu.container_module} {
   gpu.module @kernels {
-    // CHECK-LABEL: spv.func @barrier
-    gpu.func @barrier(%arg0 : f32, %arg1 : memref<12xf32, #spv.storage_class<StorageBuffer>>) kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
-      // CHECK: spv.ControlBarrier <Workgroup>, <Workgroup>, <AcquireRelease|WorkgroupMemory>
+    // CHECK-LABEL: spirv.func @barrier
+    gpu.func @barrier(%arg0 : f32, %arg1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>) kernel
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+      // CHECK: spirv.ControlBarrier <Workgroup>, <Workgroup>, <AcquireRelease|WorkgroupMemory>
       gpu.barrier
       gpu.return
     }
@@ -120,11 +120,11 @@ module attributes {gpu.container_module} {
 
   func.func @main() {
     %0 = "op"() : () -> (f32)
-    %1 = "op"() : () -> (memref<12xf32, #spv.storage_class<StorageBuffer>>)
+    %1 = "op"() : () -> (memref<12xf32, #spirv.storage_class<StorageBuffer>>)
     %cst = arith.constant 1 : index
     gpu.launch_func @kernels::@barrier
         blocks in (%cst, %cst, %cst) threads in (%cst, %cst, %cst)
-        args(%0 : f32, %1 : memref<12xf32, #spv.storage_class<StorageBuffer>>)
+        args(%0 : f32, %1 : memref<12xf32, #spirv.storage_class<StorageBuffer>>)
     return
   }
 }

diff  --git a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
index abce5d7542d7..07fae0c20e07 100644
--- a/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/load-store.mlir
@@ -2,10 +2,10 @@
 
 module attributes {
   gpu.container_module,
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
-  func.func @load_store(%arg0: memref<12x4xf32, #spv.storage_class<StorageBuffer>>, %arg1: memref<12x4xf32, #spv.storage_class<StorageBuffer>>, %arg2: memref<12x4xf32, #spv.storage_class<StorageBuffer>>) {
+  func.func @load_store(%arg0: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg1: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg2: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>) {
     %c0 = arith.constant 0 : index
     %c12 = arith.constant 12 : index
     %0 = arith.subi %c12, %c0 : index
@@ -17,32 +17,32 @@ module attributes {
     %c1_2 = arith.constant 1 : index
     gpu.launch_func @kernels::@load_store_kernel
         blocks in (%0, %c1_2, %c1_2) threads in (%1, %c1_2, %c1_2)
-        args(%arg0 : memref<12x4xf32, #spv.storage_class<StorageBuffer>>, %arg1 : memref<12x4xf32, #spv.storage_class<StorageBuffer>>, %arg2 : memref<12x4xf32, #spv.storage_class<StorageBuffer>>,
+        args(%arg0 : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg1 : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg2 : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>,
              %c0 : index, %c0_0 : index, %c1 : index, %c1_1 : index)
     return
   }
 
-  // CHECK-LABEL: spv.module @{{.*}} Logical GLSL450
+  // CHECK-LABEL: spirv.module @{{.*}} Logical GLSL450
   gpu.module @kernels {
-    // CHECK-DAG: spv.GlobalVariable @[[NUMWORKGROUPSVAR:.*]] built_in("NumWorkgroups") : !spv.ptr<vector<3xi32>, Input>
-    // CHECK-DAG: spv.GlobalVariable @[[$LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spv.ptr<vector<3xi32>, Input>
-    // CHECK-DAG: spv.GlobalVariable @[[$WORKGROUPIDVAR:.*]] built_in("WorkgroupId") : !spv.ptr<vector<3xi32>, Input>
-    // CHECK-LABEL:    spv.func @load_store_kernel
-    // CHECK-SAME: %[[ARG0:.*]]: !spv.ptr<!spv.struct<(!spv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spv.interface_var_abi = #spv.interface_var_abi<(0, 0)>}
-    // CHECK-SAME: %[[ARG1:.*]]: !spv.ptr<!spv.struct<(!spv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spv.interface_var_abi = #spv.interface_var_abi<(0, 1)>}
-    // CHECK-SAME: %[[ARG2:.*]]: !spv.ptr<!spv.struct<(!spv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spv.interface_var_abi = #spv.interface_var_abi<(0, 2)>}
-    // CHECK-SAME: %[[ARG3:.*]]: i32 {spv.interface_var_abi = #spv.interface_var_abi<(0, 3), StorageBuffer>}
-    // CHECK-SAME: %[[ARG4:.*]]: i32 {spv.interface_var_abi = #spv.interface_var_abi<(0, 4), StorageBuffer>}
-    // CHECK-SAME: %[[ARG5:.*]]: i32 {spv.interface_var_abi = #spv.interface_var_abi<(0, 5), StorageBuffer>}
-    // CHECK-SAME: %[[ARG6:.*]]: i32 {spv.interface_var_abi = #spv.interface_var_abi<(0, 6), StorageBuffer>}
-    gpu.func @load_store_kernel(%arg0: memref<12x4xf32, #spv.storage_class<StorageBuffer>>, %arg1: memref<12x4xf32, #spv.storage_class<StorageBuffer>>, %arg2: memref<12x4xf32, #spv.storage_class<StorageBuffer>>, %arg3: index, %arg4: index, %arg5: index, %arg6: index) kernel
-      attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
-      // CHECK: %[[ADDRESSWORKGROUPID:.*]] = spv.mlir.addressof @[[$WORKGROUPIDVAR]]
-      // CHECK: %[[WORKGROUPID:.*]] = spv.Load "Input" %[[ADDRESSWORKGROUPID]]
-      // CHECK: %[[WORKGROUPIDX:.*]] = spv.CompositeExtract %[[WORKGROUPID]]{{\[}}0 : i32{{\]}}
-      // CHECK: %[[ADDRESSLOCALINVOCATIONID:.*]] = spv.mlir.addressof @[[$LOCALINVOCATIONIDVAR]]
-      // CHECK: %[[LOCALINVOCATIONID:.*]] = spv.Load "Input" %[[ADDRESSLOCALINVOCATIONID]]
-      // CHECK: %[[LOCALINVOCATIONIDX:.*]] = spv.CompositeExtract %[[LOCALINVOCATIONID]]{{\[}}0 : i32{{\]}}
+    // CHECK-DAG: spirv.GlobalVariable @[[NUMWORKGROUPSVAR:.*]] built_in("NumWorkgroups") : !spirv.ptr<vector<3xi32>, Input>
+    // CHECK-DAG: spirv.GlobalVariable @[[$LOCALINVOCATIONIDVAR:.*]] built_in("LocalInvocationId") : !spirv.ptr<vector<3xi32>, Input>
+    // CHECK-DAG: spirv.GlobalVariable @[[$WORKGROUPIDVAR:.*]] built_in("WorkgroupId") : !spirv.ptr<vector<3xi32>, Input>
+    // CHECK-LABEL:    spirv.func @load_store_kernel
+    // CHECK-SAME: %[[ARG0:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 0)>}
+    // CHECK-SAME: %[[ARG1:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 1)>}
+    // CHECK-SAME: %[[ARG2:.*]]: !spirv.ptr<!spirv.struct<(!spirv.array<48 x f32, stride=4> [0])>, StorageBuffer> {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 2)>}
+    // CHECK-SAME: %[[ARG3:.*]]: i32 {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 3), StorageBuffer>}
+    // CHECK-SAME: %[[ARG4:.*]]: i32 {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 4), StorageBuffer>}
+    // CHECK-SAME: %[[ARG5:.*]]: i32 {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 5), StorageBuffer>}
+    // CHECK-SAME: %[[ARG6:.*]]: i32 {spirv.interface_var_abi = #spirv.interface_var_abi<(0, 6), StorageBuffer>}
+    gpu.func @load_store_kernel(%arg0: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg1: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg2: memref<12x4xf32, #spirv.storage_class<StorageBuffer>>, %arg3: index, %arg4: index, %arg5: index, %arg6: index) kernel
+      attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+      // CHECK: %[[ADDRESSWORKGROUPID:.*]] = spirv.mlir.addressof @[[$WORKGROUPIDVAR]]
+      // CHECK: %[[WORKGROUPID:.*]] = spirv.Load "Input" %[[ADDRESSWORKGROUPID]]
+      // CHECK: %[[WORKGROUPIDX:.*]] = spirv.CompositeExtract %[[WORKGROUPID]]{{\[}}0 : i32{{\]}}
+      // CHECK: %[[ADDRESSLOCALINVOCATIONID:.*]] = spirv.mlir.addressof @[[$LOCALINVOCATIONIDVAR]]
+      // CHECK: %[[LOCALINVOCATIONID:.*]] = spirv.Load "Input" %[[ADDRESSLOCALINVOCATIONID]]
+      // CHECK: %[[LOCALINVOCATIONIDX:.*]] = spirv.CompositeExtract %[[LOCALINVOCATIONID]]{{\[}}0 : i32{{\]}}
       %0 = gpu.block_id x
       %1 = gpu.block_id y
       %2 = gpu.block_id z
@@ -55,29 +55,29 @@ module attributes {
       %9 = gpu.block_dim x
       %10 = gpu.block_dim y
       %11 = gpu.block_dim z
-      // CHECK: %[[INDEX1:.*]] = spv.IAdd %[[ARG3]], %[[WORKGROUPIDX]]
+      // CHECK: %[[INDEX1:.*]] = spirv.IAdd %[[ARG3]], %[[WORKGROUPIDX]]
       %12 = arith.addi %arg3, %0 : index
-      // CHECK: %[[INDEX2:.*]] = spv.IAdd %[[ARG4]], %[[LOCALINVOCATIONIDX]]
+      // CHECK: %[[INDEX2:.*]] = spirv.IAdd %[[ARG4]], %[[LOCALINVOCATIONIDX]]
       %13 = arith.addi %arg4, %3 : index
-      // CHECK: %[[ZERO:.*]] = spv.Constant 0 : i32
-      // CHECK: %[[OFFSET1_0:.*]] = spv.Constant 0 : i32
-      // CHECK: %[[STRIDE1_1:.*]] = spv.Constant 4 : i32
-      // CHECK: %[[UPDATE1_1:.*]] = spv.IMul %[[STRIDE1_1]], %[[INDEX1]] : i32
-      // CHECK: %[[OFFSET1_1:.*]] = spv.IAdd %[[OFFSET1_0]], %[[UPDATE1_1]] : i32
-      // CHECK: %[[STRIDE1_2:.*]] = spv.Constant 1 : i32
-      // CHECK: %[[UPDATE1_2:.*]] = spv.IMul %[[STRIDE1_2]], %[[INDEX2]] : i32
-      // CHECK: %[[OFFSET1_2:.*]] = spv.IAdd %[[OFFSET1_1]], %[[UPDATE1_2]] : i32
-      // CHECK: %[[PTR1:.*]] = spv.AccessChain %[[ARG0]]{{\[}}%[[ZERO]], %[[OFFSET1_2]]{{\]}}
-      // CHECK-NEXT: %[[VAL1:.*]] = spv.Load "StorageBuffer" %[[PTR1]]
-      %14 = memref.load %arg0[%12, %13] : memref<12x4xf32, #spv.storage_class<StorageBuffer>>
-      // CHECK: %[[PTR2:.*]] = spv.AccessChain %[[ARG1]]{{\[}}{{%.*}}, {{%.*}}{{\]}}
-      // CHECK-NEXT: %[[VAL2:.*]] = spv.Load "StorageBuffer" %[[PTR2]]
-      %15 = memref.load %arg1[%12, %13] : memref<12x4xf32, #spv.storage_class<StorageBuffer>>
-      // CHECK: %[[VAL3:.*]] = spv.FAdd %[[VAL1]], %[[VAL2]]
+      // CHECK: %[[ZERO:.*]] = spirv.Constant 0 : i32
+      // CHECK: %[[OFFSET1_0:.*]] = spirv.Constant 0 : i32
+      // CHECK: %[[STRIDE1_1:.*]] = spirv.Constant 4 : i32
+      // CHECK: %[[UPDATE1_1:.*]] = spirv.IMul %[[STRIDE1_1]], %[[INDEX1]] : i32
+      // CHECK: %[[OFFSET1_1:.*]] = spirv.IAdd %[[OFFSET1_0]], %[[UPDATE1_1]] : i32
+      // CHECK: %[[STRIDE1_2:.*]] = spirv.Constant 1 : i32
+      // CHECK: %[[UPDATE1_2:.*]] = spirv.IMul %[[STRIDE1_2]], %[[INDEX2]] : i32
+      // CHECK: %[[OFFSET1_2:.*]] = spirv.IAdd %[[OFFSET1_1]], %[[UPDATE1_2]] : i32
+      // CHECK: %[[PTR1:.*]] = spirv.AccessChain %[[ARG0]]{{\[}}%[[ZERO]], %[[OFFSET1_2]]{{\]}}
+      // CHECK-NEXT: %[[VAL1:.*]] = spirv.Load "StorageBuffer" %[[PTR1]]
+      %14 = memref.load %arg0[%12, %13] : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>
+      // CHECK: %[[PTR2:.*]] = spirv.AccessChain %[[ARG1]]{{\[}}{{%.*}}, {{%.*}}{{\]}}
+      // CHECK-NEXT: %[[VAL2:.*]] = spirv.Load "StorageBuffer" %[[PTR2]]
+      %15 = memref.load %arg1[%12, %13] : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>
+      // CHECK: %[[VAL3:.*]] = spirv.FAdd %[[VAL1]], %[[VAL2]]
       %16 = arith.addf %14, %15 : f32
-      // CHECK: %[[PTR3:.*]] = spv.AccessChain %[[ARG2]]{{\[}}{{%.*}}, {{%.*}}{{\]}}
-      // CHECK-NEXT: spv.Store "StorageBuffer" %[[PTR3]], %[[VAL3]]
-      memref.store %16, %arg2[%12, %13] : memref<12x4xf32, #spv.storage_class<StorageBuffer>>
+      // CHECK: %[[PTR3:.*]] = spirv.AccessChain %[[ARG2]]{{\[}}{{%.*}}, {{%.*}}{{\]}}
+      // CHECK-NEXT: spirv.Store "StorageBuffer" %[[PTR3]], %[[VAL3]]
+      memref.store %16, %arg2[%12, %13] : memref<12x4xf32, #spirv.storage_class<StorageBuffer>>
       gpu.return
     }
   }

diff  --git a/mlir/test/Conversion/GPUToSPIRV/module-opencl.mlir b/mlir/test/Conversion/GPUToSPIRV/module-opencl.mlir
index 5413157a1a1e..35cd5ea26b26 100644
--- a/mlir/test/Conversion/GPUToSPIRV/module-opencl.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/module-opencl.mlir
@@ -2,29 +2,29 @@
 
 module attributes {
   gpu.container_module,
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel, Addresses], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Kernel, Addresses], []>, #spirv.resource_limits<>>
 } {
   gpu.module @kernels {
-    // CHECK-LABEL: spv.module @{{.*}} Physical64 OpenCL
-    //       CHECK:   spv.func
+    // CHECK-LABEL: spirv.module @{{.*}} Physical64 OpenCL
+    //       CHECK:   spirv.func
     //  CHECK-SAME:     {{%.*}}: f32
-    //   CHECK-NOT:     spv.interface_var_abi
-    //  CHECK-SAME:     {{%.*}}: !spv.ptr<f32, CrossWorkgroup>
-    //   CHECK-NOT:     spv.interface_var_abi
-    //  CHECK-SAME:     spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]> : vector<3xi32>>
-    gpu.func @basic_module_structure(%arg0 : f32, %arg1 : memref<12xf32, #spv.storage_class<CrossWorkgroup>>) kernel
-        attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
+    //   CHECK-NOT:     spirv.interface_var_abi
+    //  CHECK-SAME:     {{%.*}}: !spirv.ptr<f32, CrossWorkgroup>
+    //   CHECK-NOT:     spirv.interface_var_abi
+    //  CHECK-SAME:     spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]> : vector<3xi32>>
+    gpu.func @basic_module_structure(%arg0 : f32, %arg1 : memref<12xf32, #spirv.storage_class<CrossWorkgroup>>) kernel
+        attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 4, 1]>: vector<3xi32>>} {
       gpu.return
     }
   }
 
   func.func @main() {
     %0 = "op"() : () -> (f32)
-    %1 = "op"() : () -> (memref<12xf32, #spv.storage_class<CrossWorkgroup>>)
+    %1 = "op"() : () -> (memref<12xf32, #spirv.storage_class<CrossWorkgroup>>)
     %cst = arith.constant 1 : index
     gpu.launch_func @kernels::@basic_module_structure
         blocks in (%cst, %cst, %cst) threads in (%cst, %cst, %cst)
-        args(%0 : f32, %1 : memref<12xf32, #spv.storage_class<CrossWorkgroup>>)
+        args(%0 : f32, %1 : memref<12xf32, #spirv.storage_class<CrossWorkgroup>>)
     return
   }
 }

diff  --git a/mlir/test/Conversion/GPUToSPIRV/shuffle.mlir b/mlir/test/Conversion/GPUToSPIRV/shuffle.mlir
index 6a7b38cd32ab..718539626c29 100644
--- a/mlir/test/Conversion/GPUToSPIRV/shuffle.mlir
+++ b/mlir/test/Conversion/GPUToSPIRV/shuffle.mlir
@@ -2,21 +2,21 @@
 
 module attributes {
   gpu.container_module,
-  spv.target_env = #spv.target_env<#spv.vce<v1.4, [Shader, GroupNonUniformShuffle], []>, #spv.resource_limits<subgroup_size = 16>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.4, [Shader, GroupNonUniformShuffle], []>, #spirv.resource_limits<subgroup_size = 16>>
 } {
 
 gpu.module @kernels {
-  // CHECK-LABEL:  spv.func @shuffle_xor()
+  // CHECK-LABEL:  spirv.func @shuffle_xor()
   gpu.func @shuffle_xor() kernel
-    attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+    attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
     %mask = arith.constant 8 : i32
     %width = arith.constant 16 : i32
     %val = arith.constant 42.0 : f32
 
-    // CHECK: %[[MASK:.+]] = spv.Constant 8 : i32
-    // CHECK: %[[VAL:.+]] = spv.Constant 4.200000e+01 : f32
-    // CHECK: %{{.+}} = spv.Constant true
-    // CHECK: %{{.+}} = spv.GroupNonUniformShuffleXor <Subgroup> %[[VAL]], %[[MASK]] : f32, i32
+    // CHECK: %[[MASK:.+]] = spirv.Constant 8 : i32
+    // CHECK: %[[VAL:.+]] = spirv.Constant 4.200000e+01 : f32
+    // CHECK: %{{.+}} = spirv.Constant true
+    // CHECK: %{{.+}} = spirv.GroupNonUniformShuffleXor <Subgroup> %[[VAL]], %[[MASK]] : f32, i32
     %result, %valid = gpu.shuffle xor %val, %mask, %width : f32
     gpu.return
   }
@@ -28,12 +28,12 @@ gpu.module @kernels {
 
 module attributes {
   gpu.container_module,
-  spv.target_env = #spv.target_env<#spv.vce<v1.4, [Shader, GroupNonUniformShuffle], []>, #spv.resource_limits<subgroup_size = 32>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.4, [Shader, GroupNonUniformShuffle], []>, #spirv.resource_limits<subgroup_size = 32>>
 } {
 
 gpu.module @kernels {
   gpu.func @shuffle_xor() kernel
-    attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
+    attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>} {
     %mask = arith.constant 8 : i32
     %width = arith.constant 16 : i32
     %val = arith.constant 42.0 : f32

diff  --git a/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir b/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
index 8056074cf3b0..3b176c3cf834 100644
--- a/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
+++ b/mlir/test/Conversion/GPUToVulkan/lower-gpu-launch-vulkan-launch.mlir
@@ -5,18 +5,18 @@
 // CHECK: call @vulkanLaunch(%[[index]], %[[index]], %[[index]], %[[resource]]) {spirv_blob = "{{.*}}", spirv_entry_point = "kernel"}
 
 module attributes {gpu.container_module} {
-  spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
-    spv.GlobalVariable @kernel_arg_0 bind(0, 0) : !spv.ptr<!spv.struct<(!spv.array<12 x f32, stride=4> [0])>, StorageBuffer>
-    spv.func @kernel() "None" attributes {workgroup_attributions = 0 : i64} {
-      %0 = spv.mlir.addressof @kernel_arg_0 : !spv.ptr<!spv.struct<(!spv.array<12 x f32, stride=4> [0])>, StorageBuffer>
-      %2 = spv.Constant 0 : i32
-      %3 = spv.mlir.addressof @kernel_arg_0 : !spv.ptr<!spv.struct<(!spv.array<12 x f32, stride=4> [0])>, StorageBuffer>
-      %4 = spv.AccessChain %0[%2, %2] : !spv.ptr<!spv.struct<(!spv.array<12 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
-      %5 = spv.Load "StorageBuffer" %4 : f32
-      spv.Return
+  spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]> {
+    spirv.GlobalVariable @kernel_arg_0 bind(0, 0) : !spirv.ptr<!spirv.struct<(!spirv.array<12 x f32, stride=4> [0])>, StorageBuffer>
+    spirv.func @kernel() "None" attributes {workgroup_attributions = 0 : i64} {
+      %0 = spirv.mlir.addressof @kernel_arg_0 : !spirv.ptr<!spirv.struct<(!spirv.array<12 x f32, stride=4> [0])>, StorageBuffer>
+      %2 = spirv.Constant 0 : i32
+      %3 = spirv.mlir.addressof @kernel_arg_0 : !spirv.ptr<!spirv.struct<(!spirv.array<12 x f32, stride=4> [0])>, StorageBuffer>
+      %4 = spirv.AccessChain %0[%2, %2] : !spirv.ptr<!spirv.struct<(!spirv.array<12 x f32, stride=4> [0])>, StorageBuffer>, i32, i32
+      %5 = spirv.Load "StorageBuffer" %4 : f32
+      spirv.Return
     }
-    spv.EntryPoint "GLCompute" @kernel
-    spv.ExecutionMode @kernel "LocalSize", 1, 1, 1
+    spirv.EntryPoint "GLCompute" @kernel
+    spirv.ExecutionMode @kernel "LocalSize", 1, 1, 1
   }
   gpu.module @kernels {
     gpu.func @kernel(%arg0: memref<12xf32>) kernel {

diff  --git a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
index 6aeb60c8cc3e..17e8f454c184 100644
--- a/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
+++ b/mlir/test/Conversion/LinalgToSPIRV/linalg-to-spirv.mlir
@@ -13,48 +13,48 @@
 }
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spirv.resource_limits<>>
 } {
 
-// CHECK:      spv.GlobalVariable
+// CHECK:      spirv.GlobalVariable
 // CHECK-SAME: built_in("LocalInvocationId")
 
 // CHECK:      @single_workgroup_reduction
-// CHECK-SAME: (%[[INPUT:.+]]: !spv.ptr{{.+}}, %[[OUTPUT:.+]]: !spv.ptr{{.+}})
+// CHECK-SAME: (%[[INPUT:.+]]: !spirv.ptr{{.+}}, %[[OUTPUT:.+]]: !spirv.ptr{{.+}})
 
-// CHECK:        %[[ZERO:.+]] = spv.Constant 0 : i32
-// CHECK:        %[[ID:.+]] = spv.Load "Input" %{{.+}} : vector<3xi32>
-// CHECK:        %[[X:.+]] = spv.CompositeExtract %[[ID]][0 : i32]
+// CHECK:        %[[ZERO:.+]] = spirv.Constant 0 : i32
+// CHECK:        %[[ID:.+]] = spirv.Load "Input" %{{.+}} : vector<3xi32>
+// CHECK:        %[[X:.+]] = spirv.CompositeExtract %[[ID]][0 : i32]
 
-// CHECK:        %[[INPTR:.+]] = spv.AccessChain %[[INPUT]][%[[ZERO]], %[[X]]]
-// CHECK:        %[[VAL:.+]] = spv.Load "StorageBuffer" %[[INPTR]] : i32
-// CHECK:        %[[ADD:.+]] = spv.GroupNonUniformIAdd "Subgroup" "Reduce" %[[VAL]] : i32
+// CHECK:        %[[INPTR:.+]] = spirv.AccessChain %[[INPUT]][%[[ZERO]], %[[X]]]
+// CHECK:        %[[VAL:.+]] = spirv.Load "StorageBuffer" %[[INPTR]] : i32
+// CHECK:        %[[ADD:.+]] = spirv.GroupNonUniformIAdd "Subgroup" "Reduce" %[[VAL]] : i32
 
-// CHECK:        %[[OUTPTR:.+]] = spv.AccessChain %[[OUTPUT]][%[[ZERO]], %[[ZERO]]]
-// CHECK:        %[[ELECT:.+]] = spv.GroupNonUniformElect <Subgroup> : i1
+// CHECK:        %[[OUTPTR:.+]] = spirv.AccessChain %[[OUTPUT]][%[[ZERO]], %[[ZERO]]]
+// CHECK:        %[[ELECT:.+]] = spirv.GroupNonUniformElect <Subgroup> : i1
 
-// CHECK:        spv.mlir.selection {
-// CHECK:          spv.BranchConditional %[[ELECT]], ^bb1, ^bb2
+// CHECK:        spirv.mlir.selection {
+// CHECK:          spirv.BranchConditional %[[ELECT]], ^bb1, ^bb2
 // CHECK:        ^bb1:
-// CHECK:          spv.AtomicIAdd "Device" "AcquireRelease" %[[OUTPTR]], %[[ADD]]
-// CHECK:          spv.Branch ^bb2
+// CHECK:          spirv.AtomicIAdd "Device" "AcquireRelease" %[[OUTPTR]], %[[ADD]]
+// CHECK:          spirv.Branch ^bb2
 // CHECK:        ^bb2:
-// CHECK:          spv.mlir.merge
+// CHECK:          spirv.mlir.merge
 // CHECK:        }
-// CHECK:        spv.Return
+// CHECK:        spirv.Return
 
-func.func @single_workgroup_reduction(%input: memref<16xi32, #spv.storage_class<StorageBuffer>>, %output: memref<1xi32, #spv.storage_class<StorageBuffer>>) attributes {
-  spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>
+func.func @single_workgroup_reduction(%input: memref<16xi32, #spirv.storage_class<StorageBuffer>>, %output: memref<1xi32, #spirv.storage_class<StorageBuffer>>) attributes {
+  spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 1, 1]>: vector<3xi32>>
 } {
   linalg.generic #single_workgroup_reduction_trait
-      ins(%input : memref<16xi32, #spv.storage_class<StorageBuffer>>)
-     outs(%output : memref<1xi32, #spv.storage_class<StorageBuffer>>) {
+      ins(%input : memref<16xi32, #spirv.storage_class<StorageBuffer>>)
+     outs(%output : memref<1xi32, #spirv.storage_class<StorageBuffer>>) {
     ^bb(%in: i32, %out: i32):
       %sum = arith.addi %in, %out : i32
       linalg.yield %sum : i32
   }
-  spv.Return
+  spirv.Return
 }
 }
 
@@ -71,14 +71,14 @@ func.func @single_workgroup_reduction(%input: memref<16xi32, #spv.storage_class<
 }
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spirv.resource_limits<>>
 } {
-func.func @single_workgroup_reduction(%input: memref<16xi32, #spv.storage_class<StorageBuffer>>, %output: memref<1xi32, #spv.storage_class<StorageBuffer>>) {
+func.func @single_workgroup_reduction(%input: memref<16xi32, #spirv.storage_class<StorageBuffer>>, %output: memref<1xi32, #spirv.storage_class<StorageBuffer>>) {
   // expected-error @+1 {{failed to legalize operation 'linalg.generic'}}
   linalg.generic #single_workgroup_reduction_trait
-      ins(%input : memref<16xi32, #spv.storage_class<StorageBuffer>>)
-     outs(%output : memref<1xi32, #spv.storage_class<StorageBuffer>>) {
+      ins(%input : memref<16xi32, #spirv.storage_class<StorageBuffer>>)
+     outs(%output : memref<1xi32, #spirv.storage_class<StorageBuffer>>) {
     ^bb(%in: i32, %out: i32):
       %sum = arith.addi %in, %out : i32
       linalg.yield %sum : i32
@@ -100,21 +100,21 @@ func.func @single_workgroup_reduction(%input: memref<16xi32, #spv.storage_class<
 }
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spirv.resource_limits<>>
 } {
-func.func @single_workgroup_reduction(%input: memref<16xi32, #spv.storage_class<StorageBuffer>>, %output: memref<1xi32, #spv.storage_class<StorageBuffer>>) attributes {
-  spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[32, 1, 1]>: vector<3xi32>>
+func.func @single_workgroup_reduction(%input: memref<16xi32, #spirv.storage_class<StorageBuffer>>, %output: memref<1xi32, #spirv.storage_class<StorageBuffer>>) attributes {
+  spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[32, 1, 1]>: vector<3xi32>>
 } {
   // expected-error @+1 {{failed to legalize operation 'linalg.generic'}}
   linalg.generic #single_workgroup_reduction_trait
-      ins(%input : memref<16xi32, #spv.storage_class<StorageBuffer>>)
-     outs(%output : memref<1xi32, #spv.storage_class<StorageBuffer>>) {
+      ins(%input : memref<16xi32, #spirv.storage_class<StorageBuffer>>)
+     outs(%output : memref<1xi32, #spirv.storage_class<StorageBuffer>>) {
     ^bb(%in: i32, %out: i32):
       %sum = arith.addi %in, %out : i32
       linalg.yield %sum : i32
   }
-  spv.Return
+  spirv.Return
 }
 }
 
@@ -131,20 +131,20 @@ func.func @single_workgroup_reduction(%input: memref<16xi32, #spv.storage_class<
 }
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.3, [Shader, GroupNonUniformArithmetic], []>, #spirv.resource_limits<>>
 } {
-func.func @single_workgroup_reduction(%input: memref<16x8xi32, #spv.storage_class<StorageBuffer>>, %output: memref<16xi32, #spv.storage_class<StorageBuffer>>) attributes {
-  spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<[16, 8, 1]>: vector<3xi32>>
+func.func @single_workgroup_reduction(%input: memref<16x8xi32, #spirv.storage_class<StorageBuffer>>, %output: memref<16xi32, #spirv.storage_class<StorageBuffer>>) attributes {
+  spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<[16, 8, 1]>: vector<3xi32>>
 } {
   // expected-error @+1 {{failed to legalize operation 'linalg.generic'}}
   linalg.generic #single_workgroup_reduction_trait
-      ins(%input : memref<16x8xi32, #spv.storage_class<StorageBuffer>>)
-     outs(%output : memref<16xi32, #spv.storage_class<StorageBuffer>>) {
+      ins(%input : memref<16x8xi32, #spirv.storage_class<StorageBuffer>>)
+     outs(%output : memref<16xi32, #spirv.storage_class<StorageBuffer>>) {
     ^bb(%in: i32, %out: i32):
       %sum = arith.addi %in, %out : i32
       linalg.yield %sum : i32
   }
-  spv.Return
+  spirv.Return
 }
 }

diff  --git a/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
index 67a3b8ca8eb6..a9ea026d86b8 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-core-spirv.mlir
@@ -7,19 +7,19 @@ func.func @copy_sign_scalar(%value: f32, %sign: f32) -> f32 {
 
 // CHECK-LABEL: func @copy_sign_scalar
 //  CHECK-SAME: (%[[VALUE:.+]]: f32, %[[SIGN:.+]]: f32)
-//       CHECK:   %[[SMASK:.+]] = spv.Constant -2147483648 : i32
-//       CHECK:   %[[VMASK:.+]] = spv.Constant 2147483647 : i32
-//       CHECK:   %[[VCAST:.+]] = spv.Bitcast %[[VALUE]] : f32 to i32
-//       CHECK:   %[[SCAST:.+]] = spv.Bitcast %[[SIGN]] : f32 to i32
-//       CHECK:   %[[VAND:.+]] = spv.BitwiseAnd %[[VCAST]], %[[VMASK]] : i32
-//       CHECK:   %[[SAND:.+]] = spv.BitwiseAnd %[[SCAST]], %[[SMASK]] : i32
-//       CHECK:   %[[OR:.+]] = spv.BitwiseOr %[[VAND]], %[[SAND]] : i32
-//       CHECK:   %[[RESULT:.+]] = spv.Bitcast %[[OR]] : i32 to f32
+//       CHECK:   %[[SMASK:.+]] = spirv.Constant -2147483648 : i32
+//       CHECK:   %[[VMASK:.+]] = spirv.Constant 2147483647 : i32
+//       CHECK:   %[[VCAST:.+]] = spirv.Bitcast %[[VALUE]] : f32 to i32
+//       CHECK:   %[[SCAST:.+]] = spirv.Bitcast %[[SIGN]] : f32 to i32
+//       CHECK:   %[[VAND:.+]] = spirv.BitwiseAnd %[[VCAST]], %[[VMASK]] : i32
+//       CHECK:   %[[SAND:.+]] = spirv.BitwiseAnd %[[SCAST]], %[[SMASK]] : i32
+//       CHECK:   %[[OR:.+]] = spirv.BitwiseOr %[[VAND]], %[[SAND]] : i32
+//       CHECK:   %[[RESULT:.+]] = spirv.Bitcast %[[OR]] : i32 to f32
 //       CHECK:   return %[[RESULT]]
 
 // -----
 
-module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16, Int16], []>, #spv.resource_limits<>> } {
+module attributes { spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Float16, Int16], []>, #spirv.resource_limits<>> } {
 
 func.func @copy_sign_vector(%value: vector<3xf16>, %sign: vector<3xf16>) -> vector<3xf16> {
   %0 = math.copysign %value, %sign : vector<3xf16>
@@ -30,14 +30,14 @@ func.func @copy_sign_vector(%value: vector<3xf16>, %sign: vector<3xf16>) -> vect
 
 // CHECK-LABEL: func @copy_sign_vector
 //  CHECK-SAME: (%[[VALUE:.+]]: vector<3xf16>, %[[SIGN:.+]]: vector<3xf16>)
-//       CHECK:   %[[SMASK:.+]] = spv.Constant -32768 : i16
-//       CHECK:   %[[VMASK:.+]] = spv.Constant 32767 : i16
-//       CHECK:   %[[SVMASK:.+]] = spv.CompositeConstruct %[[SMASK]], %[[SMASK]], %[[SMASK]]
-//       CHECK:   %[[VVMASK:.+]] = spv.CompositeConstruct %[[VMASK]], %[[VMASK]], %[[VMASK]]
-//       CHECK:   %[[VCAST:.+]] = spv.Bitcast %[[VALUE]] : vector<3xf16> to vector<3xi16>
-//       CHECK:   %[[SCAST:.+]] = spv.Bitcast %[[SIGN]] : vector<3xf16> to vector<3xi16>
-//       CHECK:   %[[VAND:.+]] = spv.BitwiseAnd %[[VCAST]], %[[VVMASK]] : vector<3xi16>
-//       CHECK:   %[[SAND:.+]] = spv.BitwiseAnd %[[SCAST]], %[[SVMASK]] : vector<3xi16>
-//       CHECK:   %[[OR:.+]] = spv.BitwiseOr %[[VAND]], %[[SAND]] : vector<3xi16>
-//       CHECK:   %[[RESULT:.+]] = spv.Bitcast %[[OR]] : vector<3xi16> to vector<3xf16>
+//       CHECK:   %[[SMASK:.+]] = spirv.Constant -32768 : i16
+//       CHECK:   %[[VMASK:.+]] = spirv.Constant 32767 : i16
+//       CHECK:   %[[SVMASK:.+]] = spirv.CompositeConstruct %[[SMASK]], %[[SMASK]], %[[SMASK]]
+//       CHECK:   %[[VVMASK:.+]] = spirv.CompositeConstruct %[[VMASK]], %[[VMASK]], %[[VMASK]]
+//       CHECK:   %[[VCAST:.+]] = spirv.Bitcast %[[VALUE]] : vector<3xf16> to vector<3xi16>
+//       CHECK:   %[[SCAST:.+]] = spirv.Bitcast %[[SIGN]] : vector<3xf16> to vector<3xi16>
+//       CHECK:   %[[VAND:.+]] = spirv.BitwiseAnd %[[VCAST]], %[[VVMASK]] : vector<3xi16>
+//       CHECK:   %[[SAND:.+]] = spirv.BitwiseAnd %[[SCAST]], %[[SVMASK]] : vector<3xi16>
+//       CHECK:   %[[OR:.+]] = spirv.BitwiseOr %[[VAND]], %[[SAND]] : vector<3xi16>
+//       CHECK:   %[[RESULT:.+]] = spirv.Bitcast %[[OR]] : vector<3xi16> to vector<3xf16>
 //       CHECK:   return %[[RESULT]]

diff  --git a/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
index 662478aa30db..5241c8a85710 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
@@ -1,72 +1,72 @@
 // RUN: mlir-opt -split-input-file -convert-math-to-spirv -verify-diagnostics %s -o - | FileCheck %s
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Shader], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Shader], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @float32_unary_scalar
 func.func @float32_unary_scalar(%arg0: f32) {
-  // CHECK: spv.GL.Cos %{{.*}}: f32
+  // CHECK: spirv.GL.Cos %{{.*}}: f32
   %0 = math.cos %arg0 : f32
-  // CHECK: spv.GL.Exp %{{.*}}: f32
+  // CHECK: spirv.GL.Exp %{{.*}}: f32
   %1 = math.exp %arg0 : f32
-  // CHECK: %[[EXP:.+]] = spv.GL.Exp %arg0
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
-  // CHECK: spv.FSub %[[EXP]], %[[ONE]]
+  // CHECK: %[[EXP:.+]] = spirv.GL.Exp %arg0
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f32
+  // CHECK: spirv.FSub %[[EXP]], %[[ONE]]
   %2 = math.expm1 %arg0 : f32
-  // CHECK: spv.GL.Log %{{.*}}: f32
+  // CHECK: spirv.GL.Log %{{.*}}: f32
   %3 = math.log %arg0 : f32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
-  // CHECK: %[[ADDONE:.+]] = spv.FAdd %[[ONE]], %{{.+}}
-  // CHECK: spv.GL.Log %[[ADDONE]]
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f32
+  // CHECK: %[[ADDONE:.+]] = spirv.FAdd %[[ONE]], %{{.+}}
+  // CHECK: spirv.GL.Log %[[ADDONE]]
   %4 = math.log1p %arg0 : f32
-  // CHECK: spv.GL.InverseSqrt %{{.*}}: f32
+  // CHECK: spirv.GL.InverseSqrt %{{.*}}: f32
   %5 = math.rsqrt %arg0 : f32
-  // CHECK: spv.GL.Sqrt %{{.*}}: f32
+  // CHECK: spirv.GL.Sqrt %{{.*}}: f32
   %6 = math.sqrt %arg0 : f32
-  // CHECK: spv.GL.Tanh %{{.*}}: f32
+  // CHECK: spirv.GL.Tanh %{{.*}}: f32
   %7 = math.tanh %arg0 : f32
-  // CHECK: spv.GL.Sin %{{.*}}: f32
+  // CHECK: spirv.GL.Sin %{{.*}}: f32
   %8 = math.sin %arg0 : f32
-  // CHECK: spv.GL.FAbs %{{.*}}: f32
+  // CHECK: spirv.GL.FAbs %{{.*}}: f32
   %9 = math.absf %arg0 : f32
-  // CHECK: spv.GL.Ceil %{{.*}}: f32
+  // CHECK: spirv.GL.Ceil %{{.*}}: f32
   %10 = math.ceil %arg0 : f32
-  // CHECK: spv.GL.Floor %{{.*}}: f32
+  // CHECK: spirv.GL.Floor %{{.*}}: f32
   %11 = math.floor %arg0 : f32
   return
 }
 
 // CHECK-LABEL: @float32_unary_vector
 func.func @float32_unary_vector(%arg0: vector<3xf32>) {
-  // CHECK: spv.GL.Cos %{{.*}}: vector<3xf32>
+  // CHECK: spirv.GL.Cos %{{.*}}: vector<3xf32>
   %0 = math.cos %arg0 : vector<3xf32>
-  // CHECK: spv.GL.Exp %{{.*}}: vector<3xf32>
+  // CHECK: spirv.GL.Exp %{{.*}}: vector<3xf32>
   %1 = math.exp %arg0 : vector<3xf32>
-  // CHECK: %[[EXP:.+]] = spv.GL.Exp %arg0
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<3xf32>
-  // CHECK: spv.FSub %[[EXP]], %[[ONE]]
+  // CHECK: %[[EXP:.+]] = spirv.GL.Exp %arg0
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<3xf32>
+  // CHECK: spirv.FSub %[[EXP]], %[[ONE]]
   %2 = math.expm1 %arg0 : vector<3xf32>
-  // CHECK: spv.GL.Log %{{.*}}: vector<3xf32>
+  // CHECK: spirv.GL.Log %{{.*}}: vector<3xf32>
   %3 = math.log %arg0 : vector<3xf32>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<3xf32>
-  // CHECK: %[[ADDONE:.+]] = spv.FAdd %[[ONE]], %{{.+}}
-  // CHECK: spv.GL.Log %[[ADDONE]]
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<3xf32>
+  // CHECK: %[[ADDONE:.+]] = spirv.FAdd %[[ONE]], %{{.+}}
+  // CHECK: spirv.GL.Log %[[ADDONE]]
   %4 = math.log1p %arg0 : vector<3xf32>
-  // CHECK: spv.GL.InverseSqrt %{{.*}}: vector<3xf32>
+  // CHECK: spirv.GL.InverseSqrt %{{.*}}: vector<3xf32>
   %5 = math.rsqrt %arg0 : vector<3xf32>
-  // CHECK: spv.GL.Sqrt %{{.*}}: vector<3xf32>
+  // CHECK: spirv.GL.Sqrt %{{.*}}: vector<3xf32>
   %6 = math.sqrt %arg0 : vector<3xf32>
-  // CHECK: spv.GL.Tanh %{{.*}}: vector<3xf32>
+  // CHECK: spirv.GL.Tanh %{{.*}}: vector<3xf32>
   %7 = math.tanh %arg0 : vector<3xf32>
-  // CHECK: spv.GL.Sin %{{.*}}: vector<3xf32>
+  // CHECK: spirv.GL.Sin %{{.*}}: vector<3xf32>
   %8 = math.sin %arg0 : vector<3xf32>
   return
 }
 
 // CHECK-LABEL: @float32_ternary_scalar
 func.func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
-  // CHECK: spv.GL.Fma %{{.*}}: f32
+  // CHECK: spirv.GL.Fma %{{.*}}: f32
   %0 = math.fma %a, %b, %c : f32
   return
 }
@@ -74,14 +74,14 @@ func.func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
 // CHECK-LABEL: @float32_ternary_vector
 func.func @float32_ternary_vector(%a: vector<4xf32>, %b: vector<4xf32>,
                             %c: vector<4xf32>) {
-  // CHECK: spv.GL.Fma %{{.*}}: vector<4xf32>
+  // CHECK: spirv.GL.Fma %{{.*}}: vector<4xf32>
   %0 = math.fma %a, %b, %c : vector<4xf32>
   return
 }
 
 // CHECK-LABEL: @int_unary
 func.func @int_unary(%arg0: i32) {
-  // CHECK: spv.GL.SAbs %{{.*}}
+  // CHECK: spirv.GL.SAbs %{{.*}}
   %0 = math.absi %arg0 : i32
   return
 }
@@ -89,14 +89,14 @@ func.func @int_unary(%arg0: i32) {
 // CHECK-LABEL: @ctlz_scalar
 //  CHECK-SAME: (%[[VAL:.+]]: i32)
 func.func @ctlz_scalar(%val: i32) -> i32 {
-  // CHECK-DAG: %[[V1:.+]] = spv.Constant 1 : i32
-  // CHECK-DAG: %[[V31:.+]] = spv.Constant 31 : i32
-  // CHECK-DAG: %[[V32:.+]] = spv.Constant 32 : i32
-  // CHECK: %[[MSB:.+]] = spv.GL.FindUMsb %[[VAL]] : i32
-  // CHECK: %[[SUB1:.+]] = spv.ISub %[[V31]], %[[MSB]] : i32
-  // CHECK: %[[SUB2:.+]] = spv.ISub %[[V32]], %[[VAL]] : i32
-  // CHECK: %[[CMP:.+]] = spv.ULessThanEqual %[[VAL]], %[[V1]] : i32
-  // CHECK: %[[R:.+]] = spv.Select %[[CMP]], %[[SUB2]], %[[SUB1]] : i1, i32
+  // CHECK-DAG: %[[V1:.+]] = spirv.Constant 1 : i32
+  // CHECK-DAG: %[[V31:.+]] = spirv.Constant 31 : i32
+  // CHECK-DAG: %[[V32:.+]] = spirv.Constant 32 : i32
+  // CHECK: %[[MSB:.+]] = spirv.GL.FindUMsb %[[VAL]] : i32
+  // CHECK: %[[SUB1:.+]] = spirv.ISub %[[V31]], %[[MSB]] : i32
+  // CHECK: %[[SUB2:.+]] = spirv.ISub %[[V32]], %[[VAL]] : i32
+  // CHECK: %[[CMP:.+]] = spirv.ULessThanEqual %[[VAL]], %[[V1]] : i32
+  // CHECK: %[[R:.+]] = spirv.Select %[[CMP]], %[[SUB2]], %[[SUB1]] : i1, i32
   // CHECK: return %[[R]]
   %0 = math.ctlz %val : i32
   return %0 : i32
@@ -104,10 +104,10 @@ func.func @ctlz_scalar(%val: i32) -> i32 {
 
 // CHECK-LABEL: @ctlz_vector1
 func.func @ctlz_vector1(%val: vector<1xi32>) -> vector<1xi32> {
-  // CHECK: spv.GL.FindUMsb
-  // CHECK: spv.ISub
-  // CHECK: spv.ULessThanEqual
-  // CHECK: spv.Select
+  // CHECK: spirv.GL.FindUMsb
+  // CHECK: spirv.ISub
+  // CHECK: spirv.ULessThanEqual
+  // CHECK: spirv.Select
   %0 = math.ctlz %val : vector<1xi32>
   return %0 : vector<1xi32>
 }
@@ -115,14 +115,14 @@ func.func @ctlz_vector1(%val: vector<1xi32>) -> vector<1xi32> {
 // CHECK-LABEL: @ctlz_vector2
 //  CHECK-SAME: (%[[VAL:.+]]: vector<2xi32>)
 func.func @ctlz_vector2(%val: vector<2xi32>) -> vector<2xi32> {
-  // CHECK-DAG: %[[V1:.+]] = spv.Constant dense<1> : vector<2xi32>
-  // CHECK-DAG: %[[V31:.+]] = spv.Constant dense<31> : vector<2xi32>
-  // CHECK-DAG: %[[V32:.+]] = spv.Constant dense<32> : vector<2xi32>
-  // CHECK: %[[MSB:.+]] = spv.GL.FindUMsb %[[VAL]] : vector<2xi32>
-  // CHECK: %[[SUB1:.+]] = spv.ISub %[[V31]], %[[MSB]] : vector<2xi32>
-  // CHECK: %[[SUB2:.+]] = spv.ISub %[[V32]], %[[VAL]] : vector<2xi32>
-  // CHECK: %[[CMP:.+]] = spv.ULessThanEqual %[[VAL]], %[[V1]] : vector<2xi32>
-  // CHECK: %[[R:.+]] = spv.Select %[[CMP]], %[[SUB2]], %[[SUB1]] : vector<2xi1>, vector<2xi32>
+  // CHECK-DAG: %[[V1:.+]] = spirv.Constant dense<1> : vector<2xi32>
+  // CHECK-DAG: %[[V31:.+]] = spirv.Constant dense<31> : vector<2xi32>
+  // CHECK-DAG: %[[V32:.+]] = spirv.Constant dense<32> : vector<2xi32>
+  // CHECK: %[[MSB:.+]] = spirv.GL.FindUMsb %[[VAL]] : vector<2xi32>
+  // CHECK: %[[SUB1:.+]] = spirv.ISub %[[V31]], %[[MSB]] : vector<2xi32>
+  // CHECK: %[[SUB2:.+]] = spirv.ISub %[[V32]], %[[VAL]] : vector<2xi32>
+  // CHECK: %[[CMP:.+]] = spirv.ULessThanEqual %[[VAL]], %[[V1]] : vector<2xi32>
+  // CHECK: %[[R:.+]] = spirv.Select %[[CMP]], %[[SUB2]], %[[SUB1]] : vector<2xi1>, vector<2xi32>
   %0 = math.ctlz %val : vector<2xi32>
   return %0 : vector<2xi32>
 }
@@ -130,12 +130,12 @@ func.func @ctlz_vector2(%val: vector<2xi32>) -> vector<2xi32> {
 // CHECK-LABEL: @powf_scalar
 //  CHECK-SAME: (%[[LHS:.+]]: f32, %[[RHS:.+]]: f32)
 func.func @powf_scalar(%lhs: f32, %rhs: f32) -> f32 {
-  // CHECK: %[[F0:.+]] = spv.Constant 0.000000e+00 : f32
-  // CHECK: %[[LT:.+]] = spv.FOrdLessThan %[[LHS]], %[[F0]] : f32
-  // CHECK: %[[ABS:.+]] = spv.GL.FAbs %[[LHS]] : f32
-  // CHECK: %[[POW:.+]] = spv.GL.Pow %[[ABS]], %[[RHS]] : f32
-  // CHECK: %[[NEG:.+]] = spv.FNegate %[[POW]] : f32
-  // CHECK: %[[SEL:.+]] = spv.Select %[[LT]], %[[NEG]], %[[POW]] : i1, f32
+  // CHECK: %[[F0:.+]] = spirv.Constant 0.000000e+00 : f32
+  // CHECK: %[[LT:.+]] = spirv.FOrdLessThan %[[LHS]], %[[F0]] : f32
+  // CHECK: %[[ABS:.+]] = spirv.GL.FAbs %[[LHS]] : f32
+  // CHECK: %[[POW:.+]] = spirv.GL.Pow %[[ABS]], %[[RHS]] : f32
+  // CHECK: %[[NEG:.+]] = spirv.FNegate %[[POW]] : f32
+  // CHECK: %[[SEL:.+]] = spirv.Select %[[LT]], %[[NEG]], %[[POW]] : i1, f32
   %0 = math.powf %lhs, %rhs : f32
   // CHECK: return %[[SEL]]
   return %0: f32
@@ -143,43 +143,43 @@ func.func @powf_scalar(%lhs: f32, %rhs: f32) -> f32 {
 
 // CHECK-LABEL: @powf_vector
 func.func @powf_vector(%lhs: vector<4xf32>, %rhs: vector<4xf32>) -> vector<4xf32> {
-  // CHECK: spv.FOrdLessThan
-  // CHEKC: spv.GL.FAbs
-  // CHECK: spv.GL.Pow %{{.*}}: vector<4xf32>
-  // CHECK: spv.FNegate
-  // CHECK: spv.Select
+  // CHECK: spirv.FOrdLessThan
+  // CHEKC: spirv.GL.FAbs
+  // CHECK: spirv.GL.Pow %{{.*}}: vector<4xf32>
+  // CHECK: spirv.FNegate
+  // CHECK: spirv.Select
   %0 = math.powf %lhs, %rhs : vector<4xf32>
   return %0: vector<4xf32>
 }
 
 // CHECK-LABEL: @round_scalar
 func.func @round_scalar(%x: f32) -> f32 {
-  // CHECK: %[[ZERO:.+]] = spv.Constant 0.000000e+00
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00
-  // CHECK: %[[HALF:.+]] = spv.Constant 5.000000e-01
-  // CHECK: %[[ABS:.+]] = spv.GL.FAbs %arg0
-  // CHECK: %[[FLOOR:.+]] = spv.GL.Floor %[[ABS]]
-  // CHECK: %[[SUB:.+]] = spv.FSub %[[ABS]], %[[FLOOR]]
-  // CHECK: %[[GE:.+]] = spv.FOrdGreaterThanEqual %[[SUB]], %[[HALF]]
-  // CHECK: %[[SEL:.+]] = spv.Select %[[GE]], %[[ONE]], %[[ZERO]]
-  // CHECK: %[[ADD:.+]] = spv.FAdd %[[FLOOR]], %[[SEL]]
-  // CHECK: %[[BITCAST:.+]] = spv.Bitcast %[[ADD]]
+  // CHECK: %[[ZERO:.+]] = spirv.Constant 0.000000e+00
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00
+  // CHECK: %[[HALF:.+]] = spirv.Constant 5.000000e-01
+  // CHECK: %[[ABS:.+]] = spirv.GL.FAbs %arg0
+  // CHECK: %[[FLOOR:.+]] = spirv.GL.Floor %[[ABS]]
+  // CHECK: %[[SUB:.+]] = spirv.FSub %[[ABS]], %[[FLOOR]]
+  // CHECK: %[[GE:.+]] = spirv.FOrdGreaterThanEqual %[[SUB]], %[[HALF]]
+  // CHECK: %[[SEL:.+]] = spirv.Select %[[GE]], %[[ONE]], %[[ZERO]]
+  // CHECK: %[[ADD:.+]] = spirv.FAdd %[[FLOOR]], %[[SEL]]
+  // CHECK: %[[BITCAST:.+]] = spirv.Bitcast %[[ADD]]
   %0 = math.round %x : f32
   return %0: f32
 }
 
 // CHECK-LABEL: @round_vector
 func.func @round_vector(%x: vector<4xf32>) -> vector<4xf32> {
-  // CHECK: %[[ZERO:.+]] = spv.Constant dense<0.000000e+00>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00>
-  // CHECK: %[[HALF:.+]] = spv.Constant dense<5.000000e-01>
-  // CHECK: %[[ABS:.+]] = spv.GL.FAbs %arg0
-  // CHECK: %[[FLOOR:.+]] = spv.GL.Floor %[[ABS]]
-  // CHECK: %[[SUB:.+]] = spv.FSub %[[ABS]], %[[FLOOR]]
-  // CHECK: %[[GE:.+]] = spv.FOrdGreaterThanEqual %[[SUB]], %[[HALF]]
-  // CHECK: %[[SEL:.+]] = spv.Select %[[GE]], %[[ONE]], %[[ZERO]]
-  // CHECK: %[[ADD:.+]] = spv.FAdd %[[FLOOR]], %[[SEL]]
-  // CHECK: %[[BITCAST:.+]] = spv.Bitcast %[[ADD]]
+  // CHECK: %[[ZERO:.+]] = spirv.Constant dense<0.000000e+00>
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00>
+  // CHECK: %[[HALF:.+]] = spirv.Constant dense<5.000000e-01>
+  // CHECK: %[[ABS:.+]] = spirv.GL.FAbs %arg0
+  // CHECK: %[[FLOOR:.+]] = spirv.GL.Floor %[[ABS]]
+  // CHECK: %[[SUB:.+]] = spirv.FSub %[[ABS]], %[[FLOOR]]
+  // CHECK: %[[GE:.+]] = spirv.FOrdGreaterThanEqual %[[SUB]], %[[HALF]]
+  // CHECK: %[[SEL:.+]] = spirv.Select %[[GE]], %[[ONE]], %[[ZERO]]
+  // CHECK: %[[ADD:.+]] = spirv.FAdd %[[FLOOR]], %[[SEL]]
+  // CHECK: %[[BITCAST:.+]] = spirv.Bitcast %[[ADD]]
   %0 = math.round %x : vector<4xf32>
   return %0: vector<4xf32>
 }
@@ -189,7 +189,7 @@ func.func @round_vector(%x: vector<4xf32>) -> vector<4xf32> {
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<#spv.vce<v1.0, [Shader, Int64, Int16], []>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Shader, Int64, Int16], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @ctlz_scalar

diff  --git a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
index f336e7333f95..a85c7e6e06c9 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
@@ -1,88 +1,88 @@
 // RUN: mlir-opt -split-input-file -convert-math-to-spirv -verify-diagnostics %s -o - | FileCheck %s
 
-module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel], []>, #spv.resource_limits<>> } {
+module attributes { spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Kernel], []>, #spirv.resource_limits<>> } {
 
 // CHECK-LABEL: @float32_unary_scalar
 func.func @float32_unary_scalar(%arg0: f32) {
-  // CHECK: spv.CL.cos %{{.*}}: f32
+  // CHECK: spirv.CL.cos %{{.*}}: f32
   %0 = math.cos %arg0 : f32
-  // CHECK: spv.CL.exp %{{.*}}: f32
+  // CHECK: spirv.CL.exp %{{.*}}: f32
   %1 = math.exp %arg0 : f32
-  // CHECK: %[[EXP:.+]] = spv.CL.exp %arg0
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
-  // CHECK: spv.FSub %[[EXP]], %[[ONE]]
+  // CHECK: %[[EXP:.+]] = spirv.CL.exp %arg0
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f32
+  // CHECK: spirv.FSub %[[EXP]], %[[ONE]]
   %2 = math.expm1 %arg0 : f32
-  // CHECK: spv.CL.log %{{.*}}: f32
+  // CHECK: spirv.CL.log %{{.*}}: f32
   %3 = math.log %arg0 : f32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1.000000e+00 : f32
-  // CHECK: %[[ADDONE:.+]] = spv.FAdd %[[ONE]], %{{.+}}
-  // CHECK: spv.CL.log %[[ADDONE]]
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1.000000e+00 : f32
+  // CHECK: %[[ADDONE:.+]] = spirv.FAdd %[[ONE]], %{{.+}}
+  // CHECK: spirv.CL.log %[[ADDONE]]
   %4 = math.log1p %arg0 : f32
-  // CHECK: spv.CL.rsqrt %{{.*}}: f32
+  // CHECK: spirv.CL.rsqrt %{{.*}}: f32
   %5 = math.rsqrt %arg0 : f32
-  // CHECK: spv.CL.sqrt %{{.*}}: f32
+  // CHECK: spirv.CL.sqrt %{{.*}}: f32
   %6 = math.sqrt %arg0 : f32
-  // CHECK: spv.CL.tanh %{{.*}}: f32
+  // CHECK: spirv.CL.tanh %{{.*}}: f32
   %7 = math.tanh %arg0 : f32
-  // CHECK: spv.CL.sin %{{.*}}: f32
+  // CHECK: spirv.CL.sin %{{.*}}: f32
   %8 = math.sin %arg0 : f32
-  // CHECK: spv.CL.fabs %{{.*}}: f32
+  // CHECK: spirv.CL.fabs %{{.*}}: f32
   %9 = math.absf %arg0 : f32
-  // CHECK: spv.CL.ceil %{{.*}}: f32
+  // CHECK: spirv.CL.ceil %{{.*}}: f32
   %10 = math.ceil %arg0 : f32
-  // CHECK: spv.CL.floor %{{.*}}: f32
+  // CHECK: spirv.CL.floor %{{.*}}: f32
   %11 = math.floor %arg0 : f32
-  // CHECK: spv.CL.erf %{{.*}}: f32
+  // CHECK: spirv.CL.erf %{{.*}}: f32
   %12 = math.erf %arg0 : f32
-  // CHECK: spv.CL.round %{{.*}}: f32
+  // CHECK: spirv.CL.round %{{.*}}: f32
   %13 = math.round %arg0 : f32
   return
 }
 
 // CHECK-LABEL: @float32_unary_vector
 func.func @float32_unary_vector(%arg0: vector<3xf32>) {
-  // CHECK: spv.CL.cos %{{.*}}: vector<3xf32>
+  // CHECK: spirv.CL.cos %{{.*}}: vector<3xf32>
   %0 = math.cos %arg0 : vector<3xf32>
-  // CHECK: spv.CL.exp %{{.*}}: vector<3xf32>
+  // CHECK: spirv.CL.exp %{{.*}}: vector<3xf32>
   %1 = math.exp %arg0 : vector<3xf32>
-  // CHECK: %[[EXP:.+]] = spv.CL.exp %arg0
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<3xf32>
-  // CHECK: spv.FSub %[[EXP]], %[[ONE]]
+  // CHECK: %[[EXP:.+]] = spirv.CL.exp %arg0
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<3xf32>
+  // CHECK: spirv.FSub %[[EXP]], %[[ONE]]
   %2 = math.expm1 %arg0 : vector<3xf32>
-  // CHECK: spv.CL.log %{{.*}}: vector<3xf32>
+  // CHECK: spirv.CL.log %{{.*}}: vector<3xf32>
   %3 = math.log %arg0 : vector<3xf32>
-  // CHECK: %[[ONE:.+]] = spv.Constant dense<1.000000e+00> : vector<3xf32>
-  // CHECK: %[[ADDONE:.+]] = spv.FAdd %[[ONE]], %{{.+}}
-  // CHECK: spv.CL.log %[[ADDONE]]
+  // CHECK: %[[ONE:.+]] = spirv.Constant dense<1.000000e+00> : vector<3xf32>
+  // CHECK: %[[ADDONE:.+]] = spirv.FAdd %[[ONE]], %{{.+}}
+  // CHECK: spirv.CL.log %[[ADDONE]]
   %4 = math.log1p %arg0 : vector<3xf32>
-  // CHECK: spv.CL.rsqrt %{{.*}}: vector<3xf32>
+  // CHECK: spirv.CL.rsqrt %{{.*}}: vector<3xf32>
   %5 = math.rsqrt %arg0 : vector<3xf32>
-  // CHECK: spv.CL.sqrt %{{.*}}: vector<3xf32>
+  // CHECK: spirv.CL.sqrt %{{.*}}: vector<3xf32>
   %6 = math.sqrt %arg0 : vector<3xf32>
-  // CHECK: spv.CL.tanh %{{.*}}: vector<3xf32>
+  // CHECK: spirv.CL.tanh %{{.*}}: vector<3xf32>
   %7 = math.tanh %arg0 : vector<3xf32>
-  // CHECK: spv.CL.sin %{{.*}}: vector<3xf32>
+  // CHECK: spirv.CL.sin %{{.*}}: vector<3xf32>
   %8 = math.sin %arg0 : vector<3xf32>
   return
 }
 
 // CHECK-LABEL: @float32_binary_scalar
 func.func @float32_binary_scalar(%lhs: f32, %rhs: f32) {
-  // CHECK: spv.CL.pow %{{.*}}: f32
+  // CHECK: spirv.CL.pow %{{.*}}: f32
   %0 = math.powf %lhs, %rhs : f32
   return
 }
 
 // CHECK-LABEL: @float32_binary_vector
 func.func @float32_binary_vector(%lhs: vector<4xf32>, %rhs: vector<4xf32>) {
-  // CHECK: spv.CL.pow %{{.*}}: vector<4xf32>
+  // CHECK: spirv.CL.pow %{{.*}}: vector<4xf32>
   %0 = math.powf %lhs, %rhs : vector<4xf32>
   return
 }
 
 // CHECK-LABEL: @float32_ternary_scalar
 func.func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
-  // CHECK: spv.CL.fma %{{.*}}: f32
+  // CHECK: spirv.CL.fma %{{.*}}: f32
   %0 = math.fma %a, %b, %c : f32
   return
 }
@@ -90,7 +90,7 @@ func.func @float32_ternary_scalar(%a: f32, %b: f32, %c: f32) {
 // CHECK-LABEL: @float32_ternary_vector
 func.func @float32_ternary_vector(%a: vector<4xf32>, %b: vector<4xf32>,
                             %c: vector<4xf32>) {
-  // CHECK: spv.CL.fma %{{.*}}: vector<4xf32>
+  // CHECK: spirv.CL.fma %{{.*}}: vector<4xf32>
   %0 = math.fma %a, %b, %c : vector<4xf32>
   return
 }

diff  --git a/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir b/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
index 2edc37eb82e6..d1979e21a436 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/alloc.mlir
@@ -1,110 +1,110 @@
 // RUN: mlir-opt -split-input-file -convert-memref-to-spirv -canonicalize -verify-diagnostics %s -o - | FileCheck %s
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   func.func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
-    %0 = memref.alloc() : memref<4x5xf32, #spv.storage_class<Workgroup>>
-    %1 = memref.load %0[%arg0, %arg1] : memref<4x5xf32, #spv.storage_class<Workgroup>>
-    memref.store %1, %0[%arg0, %arg1] : memref<4x5xf32, #spv.storage_class<Workgroup>>
-    memref.dealloc %0 : memref<4x5xf32, #spv.storage_class<Workgroup>>
+    %0 = memref.alloc() : memref<4x5xf32, #spirv.storage_class<Workgroup>>
+    %1 = memref.load %0[%arg0, %arg1] : memref<4x5xf32, #spirv.storage_class<Workgroup>>
+    memref.store %1, %0[%arg0, %arg1] : memref<4x5xf32, #spirv.storage_class<Workgroup>>
+    memref.dealloc %0 : memref<4x5xf32, #spirv.storage_class<Workgroup>>
     return
   }
 }
-//     CHECK: spv.GlobalVariable @[[VAR:.+]] : !spv.ptr<!spv.struct<(!spv.array<20 x f32>)>, Workgroup>
+//     CHECK: spirv.GlobalVariable @[[VAR:.+]] : !spirv.ptr<!spirv.struct<(!spirv.array<20 x f32>)>, Workgroup>
 //     CHECK: func @alloc_dealloc_workgroup_mem
 // CHECK-NOT:   memref.alloc
-//     CHECK:   %[[PTR:.+]] = spv.mlir.addressof @[[VAR]]
-//     CHECK:   %[[LOADPTR:.+]] = spv.AccessChain %[[PTR]]
-//     CHECK:   %[[VAL:.+]] = spv.Load "Workgroup" %[[LOADPTR]] : f32
-//     CHECK:   %[[STOREPTR:.+]] = spv.AccessChain %[[PTR]]
-//     CHECK:   spv.Store "Workgroup" %[[STOREPTR]], %[[VAL]] : f32
+//     CHECK:   %[[PTR:.+]] = spirv.mlir.addressof @[[VAR]]
+//     CHECK:   %[[LOADPTR:.+]] = spirv.AccessChain %[[PTR]]
+//     CHECK:   %[[VAL:.+]] = spirv.Load "Workgroup" %[[LOADPTR]] : f32
+//     CHECK:   %[[STOREPTR:.+]] = spirv.AccessChain %[[PTR]]
+//     CHECK:   spirv.Store "Workgroup" %[[STOREPTR]], %[[VAL]] : f32
 // CHECK-NOT:   memref.dealloc
 
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   func.func @alloc_dealloc_workgroup_mem(%arg0 : index, %arg1 : index) {
-    %0 = memref.alloc() : memref<4x5xi16, #spv.storage_class<Workgroup>>
-    %1 = memref.load %0[%arg0, %arg1] : memref<4x5xi16, #spv.storage_class<Workgroup>>
-    memref.store %1, %0[%arg0, %arg1] : memref<4x5xi16, #spv.storage_class<Workgroup>>
-    memref.dealloc %0 : memref<4x5xi16, #spv.storage_class<Workgroup>>
+    %0 = memref.alloc() : memref<4x5xi16, #spirv.storage_class<Workgroup>>
+    %1 = memref.load %0[%arg0, %arg1] : memref<4x5xi16, #spirv.storage_class<Workgroup>>
+    memref.store %1, %0[%arg0, %arg1] : memref<4x5xi16, #spirv.storage_class<Workgroup>>
+    memref.dealloc %0 : memref<4x5xi16, #spirv.storage_class<Workgroup>>
     return
   }
 }
 
-//       CHECK: spv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
-//  CHECK-SAME:   !spv.ptr<!spv.struct<(!spv.array<10 x i32>)>, Workgroup>
+//       CHECK: spirv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
+//  CHECK-SAME:   !spirv.ptr<!spirv.struct<(!spirv.array<10 x i32>)>, Workgroup>
 //       CHECK: func @alloc_dealloc_workgroup_mem
-//       CHECK:   %[[VAR:.+]] = spv.mlir.addressof @__workgroup_mem__0
-//       CHECK:   %[[LOC:.+]] = spv.SDiv
-//       CHECK:   %[[PTR:.+]] = spv.AccessChain %[[VAR]][%{{.+}}, %[[LOC]]]
-//       CHECK:   %{{.+}} = spv.Load "Workgroup" %[[PTR]] : i32
-//       CHECK:   %[[LOC:.+]] = spv.SDiv
-//       CHECK:   %[[PTR:.+]] = spv.AccessChain %[[VAR]][%{{.+}}, %[[LOC]]]
-//       CHECK:   %{{.+}} = spv.AtomicAnd "Workgroup" "AcquireRelease" %[[PTR]], %{{.+}} : !spv.ptr<i32, Workgroup>
-//       CHECK:   %{{.+}} = spv.AtomicOr "Workgroup" "AcquireRelease" %[[PTR]], %{{.+}} : !spv.ptr<i32, Workgroup>
+//       CHECK:   %[[VAR:.+]] = spirv.mlir.addressof @__workgroup_mem__0
+//       CHECK:   %[[LOC:.+]] = spirv.SDiv
+//       CHECK:   %[[PTR:.+]] = spirv.AccessChain %[[VAR]][%{{.+}}, %[[LOC]]]
+//       CHECK:   %{{.+}} = spirv.Load "Workgroup" %[[PTR]] : i32
+//       CHECK:   %[[LOC:.+]] = spirv.SDiv
+//       CHECK:   %[[PTR:.+]] = spirv.AccessChain %[[VAR]][%{{.+}}, %[[LOC]]]
+//       CHECK:   %{{.+}} = spirv.AtomicAnd "Workgroup" "AcquireRelease" %[[PTR]], %{{.+}} : !spirv.ptr<i32, Workgroup>
+//       CHECK:   %{{.+}} = spirv.AtomicOr "Workgroup" "AcquireRelease" %[[PTR]], %{{.+}} : !spirv.ptr<i32, Workgroup>
 
 
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   func.func @two_allocs() {
-    %0 = memref.alloc() : memref<4x5xf32, #spv.storage_class<Workgroup>>
-    %1 = memref.alloc() : memref<2x3xi32, #spv.storage_class<Workgroup>>
+    %0 = memref.alloc() : memref<4x5xf32, #spirv.storage_class<Workgroup>>
+    %1 = memref.alloc() : memref<2x3xi32, #spirv.storage_class<Workgroup>>
     return
   }
 }
 
-//  CHECK-DAG: spv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
-// CHECK-SAME:   !spv.ptr<!spv.struct<(!spv.array<6 x i32>)>, Workgroup>
-//  CHECK-DAG: spv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
-// CHECK-SAME:   !spv.ptr<!spv.struct<(!spv.array<20 x f32>)>, Workgroup>
+//  CHECK-DAG: spirv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
+// CHECK-SAME:   !spirv.ptr<!spirv.struct<(!spirv.array<6 x i32>)>, Workgroup>
+//  CHECK-DAG: spirv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
+// CHECK-SAME:   !spirv.ptr<!spirv.struct<(!spirv.array<20 x f32>)>, Workgroup>
 //      CHECK: func @two_allocs()
 
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   func.func @two_allocs_vector() {
-    %0 = memref.alloc() : memref<4xvector<4xf32>, #spv.storage_class<Workgroup>>
-    %1 = memref.alloc() : memref<2xvector<2xi32>, #spv.storage_class<Workgroup>>
+    %0 = memref.alloc() : memref<4xvector<4xf32>, #spirv.storage_class<Workgroup>>
+    %1 = memref.alloc() : memref<2xvector<2xi32>, #spirv.storage_class<Workgroup>>
     return
   }
 }
 
-//  CHECK-DAG: spv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
-// CHECK-SAME:   !spv.ptr<!spv.struct<(!spv.array<2 x vector<2xi32>>)>, Workgroup>
-//  CHECK-DAG: spv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
-// CHECK-SAME:   !spv.ptr<!spv.struct<(!spv.array<4 x vector<4xf32>>)>, Workgroup>
+//  CHECK-DAG: spirv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
+// CHECK-SAME:   !spirv.ptr<!spirv.struct<(!spirv.array<2 x vector<2xi32>>)>, Workgroup>
+//  CHECK-DAG: spirv.GlobalVariable @__workgroup_mem__{{[0-9]+}}
+// CHECK-SAME:   !spirv.ptr<!spirv.struct<(!spirv.array<4 x vector<4xf32>>)>, Workgroup>
 //      CHECK: func @two_allocs_vector()
 
 
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   // CHECK-LABEL: func @alloc_dynamic_size
   func.func @alloc_dynamic_size(%arg0 : index) -> f32 {
     // CHECK: memref.alloc
-    %0 = memref.alloc(%arg0) : memref<4x?xf32, #spv.storage_class<Workgroup>>
-    %1 = memref.load %0[%arg0, %arg0] : memref<4x?xf32, #spv.storage_class<Workgroup>>
+    %0 = memref.alloc(%arg0) : memref<4x?xf32, #spirv.storage_class<Workgroup>>
+    %1 = memref.load %0[%arg0, %arg0] : memref<4x?xf32, #spirv.storage_class<Workgroup>>
     return %1: f32
   }
 }
@@ -112,15 +112,15 @@ module attributes {
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   // CHECK-LABEL: func @alloc_unsupported_memory_space
   func.func @alloc_unsupported_memory_space(%arg0: index) -> f32 {
     // CHECK: memref.alloc
-    %0 = memref.alloc() : memref<4x5xf32, #spv.storage_class<StorageBuffer>>
-    %1 = memref.load %0[%arg0, %arg0] : memref<4x5xf32, #spv.storage_class<StorageBuffer>>
+    %0 = memref.alloc() : memref<4x5xf32, #spirv.storage_class<StorageBuffer>>
+    %1 = memref.load %0[%arg0, %arg0] : memref<4x5xf32, #spirv.storage_class<StorageBuffer>>
     return %1: f32
   }
 }
@@ -129,14 +129,14 @@ module attributes {
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   // CHECK-LABEL: func @dealloc_dynamic_size
-  func.func @dealloc_dynamic_size(%arg0 : memref<4x?xf32, #spv.storage_class<Workgroup>>) {
+  func.func @dealloc_dynamic_size(%arg0 : memref<4x?xf32, #spirv.storage_class<Workgroup>>) {
     // CHECK: memref.dealloc
-    memref.dealloc %arg0 : memref<4x?xf32, #spv.storage_class<Workgroup>>
+    memref.dealloc %arg0 : memref<4x?xf32, #spirv.storage_class<Workgroup>>
     return
   }
 }
@@ -144,14 +144,14 @@ module attributes {
 // -----
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
   }
 {
   // CHECK-LABEL: func @dealloc_unsupported_memory_space
-  func.func @dealloc_unsupported_memory_space(%arg0 : memref<4x5xf32, #spv.storage_class<StorageBuffer>>) {
+  func.func @dealloc_unsupported_memory_space(%arg0 : memref<4x5xf32, #spirv.storage_class<StorageBuffer>>) {
     // CHECK: memref.dealloc
-    memref.dealloc %arg0 : memref<4x5xf32, #spv.storage_class<StorageBuffer>>
+    memref.dealloc %arg0 : memref<4x5xf32, #spirv.storage_class<StorageBuffer>>
     return
   }
 }

diff  --git a/mlir/test/Conversion/MemRefToSPIRV/alloca.mlir b/mlir/test/Conversion/MemRefToSPIRV/alloca.mlir
index 80081280eb41..fb270235adf5 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/alloca.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/alloca.mlir
@@ -1,66 +1,66 @@
 // RUN: mlir-opt -split-input-file -convert-memref-to-spirv -canonicalize -verify-diagnostics %s -o - | FileCheck %s
 
-module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, #spv.resource_limits<>>} {
+module attributes {spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader], []>, #spirv.resource_limits<>>} {
   func.func @alloc_function_variable(%arg0 : index, %arg1 : index) {
-    %0 = memref.alloca() : memref<4x5xf32, #spv.storage_class<Function>>
-    %1 = memref.load %0[%arg0, %arg1] : memref<4x5xf32, #spv.storage_class<Function>>
-    memref.store %1, %0[%arg0, %arg1] : memref<4x5xf32, #spv.storage_class<Function>>
+    %0 = memref.alloca() : memref<4x5xf32, #spirv.storage_class<Function>>
+    %1 = memref.load %0[%arg0, %arg1] : memref<4x5xf32, #spirv.storage_class<Function>>
+    memref.store %1, %0[%arg0, %arg1] : memref<4x5xf32, #spirv.storage_class<Function>>
     return
   }
 }
 
 // CHECK-LABEL: func @alloc_function_variable
-//       CHECK:   %[[VAR:.+]] = spv.Variable : !spv.ptr<!spv.struct<(!spv.array<20 x f32>)>, Function>
-//       CHECK:   %[[LOADPTR:.+]] = spv.AccessChain %[[VAR]]
-//       CHECK:   %[[VAL:.+]] = spv.Load "Function" %[[LOADPTR]] : f32
-//       CHECK:   %[[STOREPTR:.+]] = spv.AccessChain %[[VAR]]
-//       CHECK:   spv.Store "Function" %[[STOREPTR]], %[[VAL]] : f32
+//       CHECK:   %[[VAR:.+]] = spirv.Variable : !spirv.ptr<!spirv.struct<(!spirv.array<20 x f32>)>, Function>
+//       CHECK:   %[[LOADPTR:.+]] = spirv.AccessChain %[[VAR]]
+//       CHECK:   %[[VAL:.+]] = spirv.Load "Function" %[[LOADPTR]] : f32
+//       CHECK:   %[[STOREPTR:.+]] = spirv.AccessChain %[[VAR]]
+//       CHECK:   spirv.Store "Function" %[[STOREPTR]], %[[VAL]] : f32
 
 
 // -----
 
-module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, #spv.resource_limits<>>} {
+module attributes {spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader], []>, #spirv.resource_limits<>>} {
   func.func @two_allocs() {
-    %0 = memref.alloca() : memref<4x5xf32, #spv.storage_class<Function>>
-    %1 = memref.alloca() : memref<2x3xi32, #spv.storage_class<Function>>
+    %0 = memref.alloca() : memref<4x5xf32, #spirv.storage_class<Function>>
+    %1 = memref.alloca() : memref<2x3xi32, #spirv.storage_class<Function>>
     return
   }
 }
 
 // CHECK-LABEL: func @two_allocs
-//   CHECK-DAG: spv.Variable : !spv.ptr<!spv.struct<(!spv.array<6 x i32>)>, Function>
-//   CHECK-DAG: spv.Variable : !spv.ptr<!spv.struct<(!spv.array<20 x f32>)>, Function>
+//   CHECK-DAG: spirv.Variable : !spirv.ptr<!spirv.struct<(!spirv.array<6 x i32>)>, Function>
+//   CHECK-DAG: spirv.Variable : !spirv.ptr<!spirv.struct<(!spirv.array<20 x f32>)>, Function>
 
 // -----
 
-module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, #spv.resource_limits<>>} {
+module attributes {spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader], []>, #spirv.resource_limits<>>} {
   func.func @two_allocs_vector() {
-    %0 = memref.alloca() : memref<4xvector<4xf32>, #spv.storage_class<Function>>
-    %1 = memref.alloca() : memref<2xvector<2xi32>, #spv.storage_class<Function>>
+    %0 = memref.alloca() : memref<4xvector<4xf32>, #spirv.storage_class<Function>>
+    %1 = memref.alloca() : memref<2xvector<2xi32>, #spirv.storage_class<Function>>
     return
   }
 }
 
 // CHECK-LABEL: func @two_allocs_vector
-//   CHECK-DAG: spv.Variable : !spv.ptr<!spv.struct<(!spv.array<2 x vector<2xi32>>)>, Function>
-//   CHECK-DAG: spv.Variable : !spv.ptr<!spv.struct<(!spv.array<4 x vector<4xf32>>)>, Function>
+//   CHECK-DAG: spirv.Variable : !spirv.ptr<!spirv.struct<(!spirv.array<2 x vector<2xi32>>)>, Function>
+//   CHECK-DAG: spirv.Variable : !spirv.ptr<!spirv.struct<(!spirv.array<4 x vector<4xf32>>)>, Function>
 
 
 // -----
 
-module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, #spv.resource_limits<>>} {
+module attributes {spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader], []>, #spirv.resource_limits<>>} {
   // CHECK-LABEL: func @alloc_dynamic_size
   func.func @alloc_dynamic_size(%arg0 : index) -> f32 {
     // CHECK: memref.alloca
-    %0 = memref.alloca(%arg0) : memref<4x?xf32, #spv.storage_class<Function>>
-    %1 = memref.load %0[%arg0, %arg0] : memref<4x?xf32, #spv.storage_class<Function>>
+    %0 = memref.alloca(%arg0) : memref<4x?xf32, #spirv.storage_class<Function>>
+    %1 = memref.load %0[%arg0, %arg0] : memref<4x?xf32, #spirv.storage_class<Function>>
     return %1: f32
   }
 }
 
 // -----
 
-module attributes {spv.target_env = #spv.target_env<#spv.vce<v1.3, [Shader], []>, #spv.resource_limits<>>} {
+module attributes {spirv.target_env = #spirv.target_env<#spirv.vce<v1.3, [Shader], []>, #spirv.resource_limits<>>} {
   // CHECK-LABEL: func @alloc_unsupported_memory_space
   func.func @alloc_unsupported_memory_space(%arg0: index) -> f32 {
     // CHECK: memref.alloca

diff  --git a/mlir/test/Conversion/MemRefToSPIRV/map-storage-class.mlir b/mlir/test/Conversion/MemRefToSPIRV/map-storage-class.mlir
index 9b10e8d017ed..f0956b62760a 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/map-storage-class.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/map-storage-class.mlir
@@ -18,29 +18,29 @@
 // VULKAN-LABEL: func @operand_result
 // OPENCL-LABEL: func @operand_result
 func.func @operand_result() {
-  // VULKAN: memref<f32, #spv.storage_class<StorageBuffer>>
-  // OPENCL: memref<f32, #spv.storage_class<CrossWorkgroup>>
+  // VULKAN: memref<f32, #spirv.storage_class<StorageBuffer>>
+  // OPENCL: memref<f32, #spirv.storage_class<CrossWorkgroup>>
   %0 = "dialect.memref_producer"() : () -> (memref<f32>)
-  // VULKAN: memref<4xi32, #spv.storage_class<Generic>>
-  // OPENCL: memref<4xi32, #spv.storage_class<Generic>>
+  // VULKAN: memref<4xi32, #spirv.storage_class<Generic>>
+  // OPENCL: memref<4xi32, #spirv.storage_class<Generic>>
   %1 = "dialect.memref_producer"() : () -> (memref<4xi32, 1>)
-  // VULKAN: memref<?x4xf16, #spv.storage_class<Workgroup>>
-  // OPENCL: memref<?x4xf16, #spv.storage_class<Workgroup>>
+  // VULKAN: memref<?x4xf16, #spirv.storage_class<Workgroup>>
+  // OPENCL: memref<?x4xf16, #spirv.storage_class<Workgroup>>
   %2 = "dialect.memref_producer"() : () -> (memref<?x4xf16, 3>)
-  // VULKAN: memref<*xf16, #spv.storage_class<Uniform>>
-  // OPENCL: memref<*xf16, #spv.storage_class<UniformConstant>>
+  // VULKAN: memref<*xf16, #spirv.storage_class<Uniform>>
+  // OPENCL: memref<*xf16, #spirv.storage_class<UniformConstant>>
   %3 = "dialect.memref_producer"() : () -> (memref<*xf16, 4>)
 
 
   "dialect.memref_consumer"(%0) : (memref<f32>) -> ()
-  // VULKAN: memref<4xi32, #spv.storage_class<Generic>>
-  // OPENCL: memref<4xi32, #spv.storage_class<Generic>>
+  // VULKAN: memref<4xi32, #spirv.storage_class<Generic>>
+  // OPENCL: memref<4xi32, #spirv.storage_class<Generic>>
   "dialect.memref_consumer"(%1) : (memref<4xi32, 1>) -> ()
-  // VULKAN: memref<?x4xf16, #spv.storage_class<Workgroup>>
-  // OPENCL: memref<?x4xf16, #spv.storage_class<Workgroup>>
+  // VULKAN: memref<?x4xf16, #spirv.storage_class<Workgroup>>
+  // OPENCL: memref<?x4xf16, #spirv.storage_class<Workgroup>>
   "dialect.memref_consumer"(%2) : (memref<?x4xf16, 3>) -> ()
-  // VULKAN: memref<*xf16, #spv.storage_class<Uniform>>
-  // OPENCL: memref<*xf16, #spv.storage_class<UniformConstant>>
+  // VULKAN: memref<*xf16, #spirv.storage_class<Uniform>>
+  // OPENCL: memref<*xf16, #spirv.storage_class<UniformConstant>>
   "dialect.memref_consumer"(%3) : (memref<*xf16, 4>) -> ()
 
   return
@@ -51,8 +51,8 @@ func.func @operand_result() {
 // VULKAN-LABEL: func @type_attribute
 // OPENCL-LABEL: func @type_attribute
 func.func @type_attribute() {
-  // VULKAN: attr = memref<i32, #spv.storage_class<Generic>>
-  // OPENCL: attr = memref<i32, #spv.storage_class<Generic>>
+  // VULKAN: attr = memref<i32, #spirv.storage_class<Generic>>
+  // OPENCL: attr = memref<i32, #spirv.storage_class<Generic>>
   "dialect.memref_producer"() { attr = memref<i32, 1> } : () -> ()
   return
 }
@@ -62,11 +62,11 @@ func.func @type_attribute() {
 // VULKAN-LABEL: func.func @function_io
 // OPENCL-LABEL: func.func @function_io
 func.func @function_io
-  // VULKAN-SAME: (%{{.+}}: memref<f64, #spv.storage_class<Generic>>, %{{.+}}: memref<4xi32, #spv.storage_class<Workgroup>>)
-  // OPENCL-SAME: (%{{.+}}: memref<f64, #spv.storage_class<Generic>>, %{{.+}}: memref<4xi32, #spv.storage_class<Workgroup>>)
+  // VULKAN-SAME: (%{{.+}}: memref<f64, #spirv.storage_class<Generic>>, %{{.+}}: memref<4xi32, #spirv.storage_class<Workgroup>>)
+  // OPENCL-SAME: (%{{.+}}: memref<f64, #spirv.storage_class<Generic>>, %{{.+}}: memref<4xi32, #spirv.storage_class<Workgroup>>)
   (%arg0: memref<f64, 1>, %arg1: memref<4xi32, 3>)
-  // VULKAN-SAME: -> (memref<f64, #spv.storage_class<Generic>>, memref<4xi32, #spv.storage_class<Workgroup>>)
-  // OPENCL-SAME: -> (memref<f64, #spv.storage_class<Generic>>, memref<4xi32, #spv.storage_class<Workgroup>>)
+  // VULKAN-SAME: -> (memref<f64, #spirv.storage_class<Generic>>, memref<4xi32, #spirv.storage_class<Workgroup>>)
+  // OPENCL-SAME: -> (memref<f64, #spirv.storage_class<Generic>>, memref<4xi32, #spirv.storage_class<Workgroup>>)
   -> (memref<f64, 1>, memref<4xi32, 3>) {
   return %arg0, %arg1: memref<f64, 1>, memref<4xi32, 3>
 }
@@ -76,8 +76,8 @@ func.func @function_io
 gpu.module @kernel {
 // VULKAN-LABEL: gpu.func @function_io
 // OPENCL-LABEL: gpu.func @function_io
-// VULKAN-SAME: memref<8xi32, #spv.storage_class<StorageBuffer>>
-// OPENCL-SAME: memref<8xi32, #spv.storage_class<CrossWorkgroup>>
+// VULKAN-SAME: memref<8xi32, #spirv.storage_class<StorageBuffer>>
+// OPENCL-SAME: memref<8xi32, #spirv.storage_class<CrossWorkgroup>>
 gpu.func @function_io(%arg0 : memref<8xi32>) kernel { gpu.return }
 }
 
@@ -87,10 +87,10 @@ gpu.func @function_io(%arg0 : memref<8xi32>) kernel { gpu.return }
 // OPENCL-LABEL: func.func @region
 func.func @region(%cond: i1, %arg0: memref<f32, 1>) {
   scf.if %cond {
-    //      VULKAN: "dialect.memref_consumer"(%{{.+}}) {attr = memref<i64, #spv.storage_class<Workgroup>>}
-    //      OPENCL: "dialect.memref_consumer"(%{{.+}}) {attr = memref<i64, #spv.storage_class<Workgroup>>}
-    // VULKAN-SAME: (memref<f32, #spv.storage_class<Generic>>) -> memref<f32, #spv.storage_class<Generic>>
-    // OPENCL-SAME: (memref<f32, #spv.storage_class<Generic>>) -> memref<f32, #spv.storage_class<Generic>>
+    //      VULKAN: "dialect.memref_consumer"(%{{.+}}) {attr = memref<i64, #spirv.storage_class<Workgroup>>}
+    //      OPENCL: "dialect.memref_consumer"(%{{.+}}) {attr = memref<i64, #spirv.storage_class<Workgroup>>}
+    // VULKAN-SAME: (memref<f32, #spirv.storage_class<Generic>>) -> memref<f32, #spirv.storage_class<Generic>>
+    // OPENCL-SAME: (memref<f32, #spirv.storage_class<Generic>>) -> memref<f32, #spirv.storage_class<Generic>>
     %0 = "dialect.memref_consumer"(%arg0) { attr = memref<i64, 3> } : (memref<f32, 1>) -> (memref<f32, 1>)
   }
   return
@@ -118,24 +118,24 @@ func.func @missing_mapping() {
 // -----
 
 /// Checks memory maps to OpenCL mapping if Kernel capability is enabled.
-module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel], []>, #spv.resource_limits<>> } {
+module attributes { spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Kernel], []>, #spirv.resource_limits<>> } {
 func.func @operand_result() {
-  // CHECK: memref<f32, #spv.storage_class<CrossWorkgroup>>
+  // CHECK: memref<f32, #spirv.storage_class<CrossWorkgroup>>
   %0 = "dialect.memref_producer"() : () -> (memref<f32>)
-  // CHECK: memref<4xi32, #spv.storage_class<Generic>>
+  // CHECK: memref<4xi32, #spirv.storage_class<Generic>>
   %1 = "dialect.memref_producer"() : () -> (memref<4xi32, 1>)
-  // CHECK: memref<?x4xf16, #spv.storage_class<Workgroup>>
+  // CHECK: memref<?x4xf16, #spirv.storage_class<Workgroup>>
   %2 = "dialect.memref_producer"() : () -> (memref<?x4xf16, 3>)
-  // CHECK: memref<*xf16, #spv.storage_class<UniformConstant>>
+  // CHECK: memref<*xf16, #spirv.storage_class<UniformConstant>>
   %3 = "dialect.memref_producer"() : () -> (memref<*xf16, 4>)
 
 
   "dialect.memref_consumer"(%0) : (memref<f32>) -> ()
-  // CHECK: memref<4xi32, #spv.storage_class<Generic>>
+  // CHECK: memref<4xi32, #spirv.storage_class<Generic>>
   "dialect.memref_consumer"(%1) : (memref<4xi32, 1>) -> ()
-  // CHECK: memref<?x4xf16, #spv.storage_class<Workgroup>>
+  // CHECK: memref<?x4xf16, #spirv.storage_class<Workgroup>>
   "dialect.memref_consumer"(%2) : (memref<?x4xf16, 3>) -> ()
-  // CHECK: memref<*xf16, #spv.storage_class<UniformConstant>>
+  // CHECK: memref<*xf16, #spirv.storage_class<UniformConstant>>
   "dialect.memref_consumer"(%3) : (memref<*xf16, 4>) -> ()
 
   return
@@ -145,24 +145,24 @@ func.func @operand_result() {
 // -----
 
 /// Checks memory maps to Vulkan mapping if Shader capability is enabled.
-module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Shader], []>, #spv.resource_limits<>> } {
+module attributes { spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Shader], []>, #spirv.resource_limits<>> } {
 func.func @operand_result() {
-  // CHECK: memref<f32, #spv.storage_class<StorageBuffer>>
+  // CHECK: memref<f32, #spirv.storage_class<StorageBuffer>>
   %0 = "dialect.memref_producer"() : () -> (memref<f32>)
-  // CHECK: memref<4xi32, #spv.storage_class<Generic>>
+  // CHECK: memref<4xi32, #spirv.storage_class<Generic>>
   %1 = "dialect.memref_producer"() : () -> (memref<4xi32, 1>)
-  // CHECK: memref<?x4xf16, #spv.storage_class<Workgroup>>
+  // CHECK: memref<?x4xf16, #spirv.storage_class<Workgroup>>
   %2 = "dialect.memref_producer"() : () -> (memref<?x4xf16, 3>)
-  // CHECK: memref<*xf16, #spv.storage_class<Uniform>>
+  // CHECK: memref<*xf16, #spirv.storage_class<Uniform>>
   %3 = "dialect.memref_producer"() : () -> (memref<*xf16, 4>)
 
 
   "dialect.memref_consumer"(%0) : (memref<f32>) -> ()
-  // CHECK: memref<4xi32, #spv.storage_class<Generic>>
+  // CHECK: memref<4xi32, #spirv.storage_class<Generic>>
   "dialect.memref_consumer"(%1) : (memref<4xi32, 1>) -> ()
-  // CHECK: memref<?x4xf16, #spv.storage_class<Workgroup>>
+  // CHECK: memref<?x4xf16, #spirv.storage_class<Workgroup>>
   "dialect.memref_consumer"(%2) : (memref<?x4xf16, 3>) -> ()
-  // CHECK: memref<*xf16, #spv.storage_class<Uniform>>
+  // CHECK: memref<*xf16, #spirv.storage_class<Uniform>>
   "dialect.memref_consumer"(%3) : (memref<*xf16, 4>) -> ()
   return
 }

diff  --git a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
index 98e44b958e8e..e3ddb7398055 100644
--- a/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
+++ b/mlir/test/Conversion/MemRefToSPIRV/memref-to-spirv.mlir
@@ -4,104 +4,104 @@
 // perform special tricks.
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0,
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0,
       [
         Shader, Int8, Int16, Int64, Float16, Float64,
         StorageBuffer16BitAccess, StorageUniform16, StoragePushConstant16,
         StorageBuffer8BitAccess, UniformAndStorageBuffer8BitAccess, StoragePushConstant8
       ],
-      [SPV_KHR_16bit_storage, SPV_KHR_8bit_storage, SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+      [SPV_KHR_16bit_storage, SPV_KHR_8bit_storage, SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @load_store_zero_rank_float
-func.func @load_store_zero_rank_float(%arg0: memref<f32, #spv.storage_class<StorageBuffer>>, %arg1: memref<f32, #spv.storage_class<StorageBuffer>>) {
-  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.array<1 x f32, stride=4> [0])>, StorageBuffer>
-  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.array<1 x f32, stride=4> [0])>, StorageBuffer>
-  //      CHECK: [[ZERO1:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.AccessChain [[ARG0]][
+func.func @load_store_zero_rank_float(%arg0: memref<f32, #spirv.storage_class<StorageBuffer>>, %arg1: memref<f32, #spirv.storage_class<StorageBuffer>>) {
+  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<1 x f32, stride=4> [0])>, StorageBuffer>
+  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<1 x f32, stride=4> [0])>, StorageBuffer>
+  //      CHECK: [[ZERO1:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.AccessChain [[ARG0]][
   // CHECK-SAME: [[ZERO1]], [[ZERO1]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Load "StorageBuffer" %{{.*}} : f32
-  %0 = memref.load %arg0[] : memref<f32, #spv.storage_class<StorageBuffer>>
-  //      CHECK: [[ZERO2:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.AccessChain [[ARG1]][
+  //      CHECK: spirv.Load "StorageBuffer" %{{.*}} : f32
+  %0 = memref.load %arg0[] : memref<f32, #spirv.storage_class<StorageBuffer>>
+  //      CHECK: [[ZERO2:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.AccessChain [[ARG1]][
   // CHECK-SAME: [[ZERO2]], [[ZERO2]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Store "StorageBuffer" %{{.*}} : f32
-  memref.store %0, %arg1[] : memref<f32, #spv.storage_class<StorageBuffer>>
+  //      CHECK: spirv.Store "StorageBuffer" %{{.*}} : f32
+  memref.store %0, %arg1[] : memref<f32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @load_store_zero_rank_int
-func.func @load_store_zero_rank_int(%arg0: memref<i32, #spv.storage_class<StorageBuffer>>, %arg1: memref<i32, #spv.storage_class<StorageBuffer>>) {
-  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.array<1 x i32, stride=4> [0])>, StorageBuffer>
-  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.array<1 x i32, stride=4> [0])>, StorageBuffer>
-  //      CHECK: [[ZERO1:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.AccessChain [[ARG0]][
+func.func @load_store_zero_rank_int(%arg0: memref<i32, #spirv.storage_class<StorageBuffer>>, %arg1: memref<i32, #spirv.storage_class<StorageBuffer>>) {
+  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer>
+  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<1 x i32, stride=4> [0])>, StorageBuffer>
+  //      CHECK: [[ZERO1:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.AccessChain [[ARG0]][
   // CHECK-SAME: [[ZERO1]], [[ZERO1]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Load "StorageBuffer" %{{.*}} : i32
-  %0 = memref.load %arg0[] : memref<i32, #spv.storage_class<StorageBuffer>>
-  //      CHECK: [[ZERO2:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.AccessChain [[ARG1]][
+  //      CHECK: spirv.Load "StorageBuffer" %{{.*}} : i32
+  %0 = memref.load %arg0[] : memref<i32, #spirv.storage_class<StorageBuffer>>
+  //      CHECK: [[ZERO2:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.AccessChain [[ARG1]][
   // CHECK-SAME: [[ZERO2]], [[ZERO2]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Store "StorageBuffer" %{{.*}} : i32
-  memref.store %0, %arg1[] : memref<i32, #spv.storage_class<StorageBuffer>>
+  //      CHECK: spirv.Store "StorageBuffer" %{{.*}} : i32
+  memref.store %0, %arg1[] : memref<i32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: func @load_store_unknown_dim
-func.func @load_store_unknown_dim(%i: index, %source: memref<?xi32, #spv.storage_class<StorageBuffer>>, %dest: memref<?xi32, #spv.storage_class<StorageBuffer>>) {
-  // CHECK: %[[SRC:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
-  // CHECK: %[[DST:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.rtarray<i32, stride=4> [0])>, StorageBuffer>
-  // CHECK: %[[AC0:.+]] = spv.AccessChain %[[SRC]]
-  // CHECK: spv.Load "StorageBuffer" %[[AC0]]
-  %0 = memref.load %source[%i] : memref<?xi32, #spv.storage_class<StorageBuffer>>
-  // CHECK: %[[AC1:.+]] = spv.AccessChain %[[DST]]
-  // CHECK: spv.Store "StorageBuffer" %[[AC1]]
-  memref.store %0, %dest[%i]: memref<?xi32, #spv.storage_class<StorageBuffer>>
+func.func @load_store_unknown_dim(%i: index, %source: memref<?xi32, #spirv.storage_class<StorageBuffer>>, %dest: memref<?xi32, #spirv.storage_class<StorageBuffer>>) {
+  // CHECK: %[[SRC:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.rtarray<i32, stride=4> [0])>, StorageBuffer>
+  // CHECK: %[[DST:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.rtarray<i32, stride=4> [0])>, StorageBuffer>
+  // CHECK: %[[AC0:.+]] = spirv.AccessChain %[[SRC]]
+  // CHECK: spirv.Load "StorageBuffer" %[[AC0]]
+  %0 = memref.load %source[%i] : memref<?xi32, #spirv.storage_class<StorageBuffer>>
+  // CHECK: %[[AC1:.+]] = spirv.AccessChain %[[DST]]
+  // CHECK: spirv.Store "StorageBuffer" %[[AC1]]
+  memref.store %0, %dest[%i]: memref<?xi32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: func @load_i1
-//  CHECK-SAME: (%[[SRC:.+]]: memref<4xi1, #spv.storage_class<StorageBuffer>>, %[[IDX:.+]]: index)
-func.func @load_i1(%src: memref<4xi1, #spv.storage_class<StorageBuffer>>, %i : index) -> i1 {
-  // CHECK-DAG: %[[SRC_CAST:.+]] = builtin.unrealized_conversion_cast %[[SRC]] : memref<4xi1, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.array<4 x i8, stride=1> [0])>, StorageBuffer>
+//  CHECK-SAME: (%[[SRC:.+]]: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %[[IDX:.+]]: index)
+func.func @load_i1(%src: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %i : index) -> i1 {
+  // CHECK-DAG: %[[SRC_CAST:.+]] = builtin.unrealized_conversion_cast %[[SRC]] : memref<4xi1, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<4 x i8, stride=1> [0])>, StorageBuffer>
   // CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
-  // CHECK: %[[ZERO_0:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ZERO_1:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  // CHECK: %[[MUL:.+]] = spv.IMul %[[ONE]], %[[IDX_CAST]] : i32
-  // CHECK: %[[ADD:.+]] = spv.IAdd %[[ZERO_1]], %[[MUL]] : i32
-  // CHECK: %[[ADDR:.+]] = spv.AccessChain %[[SRC_CAST]][%[[ZERO_0]], %[[ADD]]]
-  // CHECK: %[[VAL:.+]] = spv.Load "StorageBuffer" %[[ADDR]] : i8
-  // CHECK: %[[ONE_I8:.+]] = spv.Constant 1 : i8
-  // CHECK: %[[BOOL:.+]] = spv.IEqual %[[VAL]], %[[ONE_I8]] : i8
-  %0 = memref.load %src[%i] : memref<4xi1, #spv.storage_class<StorageBuffer>>
+  // CHECK: %[[ZERO_0:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ZERO_1:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
+  // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO_1]], %[[MUL]] : i32
+  // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[SRC_CAST]][%[[ZERO_0]], %[[ADD]]]
+  // CHECK: %[[VAL:.+]] = spirv.Load "StorageBuffer" %[[ADDR]] : i8
+  // CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
+  // CHECK: %[[BOOL:.+]] = spirv.IEqual %[[VAL]], %[[ONE_I8]] : i8
+  %0 = memref.load %src[%i] : memref<4xi1, #spirv.storage_class<StorageBuffer>>
   // CHECK: return %[[BOOL]]
   return %0: i1
 }
 
 // CHECK-LABEL: func @store_i1
-//  CHECK-SAME: %[[DST:.+]]: memref<4xi1, #spv.storage_class<StorageBuffer>>,
+//  CHECK-SAME: %[[DST:.+]]: memref<4xi1, #spirv.storage_class<StorageBuffer>>,
 //  CHECK-SAME: %[[IDX:.+]]: index
-func.func @store_i1(%dst: memref<4xi1, #spv.storage_class<StorageBuffer>>, %i: index) {
+func.func @store_i1(%dst: memref<4xi1, #spirv.storage_class<StorageBuffer>>, %i: index) {
   %true = arith.constant true
-  // CHECK-DAG: %[[DST_CAST:.+]] = builtin.unrealized_conversion_cast %[[DST]] : memref<4xi1, #spv.storage_class<StorageBuffer>> to !spv.ptr<!spv.struct<(!spv.array<4 x i8, stride=1> [0])>, StorageBuffer>
+  // CHECK-DAG: %[[DST_CAST:.+]] = builtin.unrealized_conversion_cast %[[DST]] : memref<4xi1, #spirv.storage_class<StorageBuffer>> to !spirv.ptr<!spirv.struct<(!spirv.array<4 x i8, stride=1> [0])>, StorageBuffer>
   // CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
-  // CHECK: %[[ZERO_0:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ZERO_1:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  // CHECK: %[[MUL:.+]] = spv.IMul %[[ONE]], %[[IDX_CAST]] : i32
-  // CHECK: %[[ADD:.+]] = spv.IAdd %[[ZERO_1]], %[[MUL]] : i32
-  // CHECK: %[[ADDR:.+]] = spv.AccessChain %[[DST_CAST]][%[[ZERO_0]], %[[ADD]]]
-  // CHECK: %[[ZERO_I8:.+]] = spv.Constant 0 : i8
-  // CHECK: %[[ONE_I8:.+]] = spv.Constant 1 : i8
-  // CHECK: %[[RES:.+]] = spv.Select %{{.+}}, %[[ONE_I8]], %[[ZERO_I8]] : i1, i8
-  // CHECK: spv.Store "StorageBuffer" %[[ADDR]], %[[RES]] : i8
-  memref.store %true, %dst[%i]: memref<4xi1, #spv.storage_class<StorageBuffer>>
+  // CHECK: %[[ZERO_0:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ZERO_1:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
+  // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO_1]], %[[MUL]] : i32
+  // CHECK: %[[ADDR:.+]] = spirv.AccessChain %[[DST_CAST]][%[[ZERO_0]], %[[ADD]]]
+  // CHECK: %[[ZERO_I8:.+]] = spirv.Constant 0 : i8
+  // CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
+  // CHECK: %[[RES:.+]] = spirv.Select %{{.+}}, %[[ONE_I8]], %[[ZERO_I8]] : i1, i8
+  // CHECK: spirv.Store "StorageBuffer" %[[ADDR]], %[[RES]] : i8
+  memref.store %true, %dst[%i]: memref<4xi1, #spirv.storage_class<StorageBuffer>>
   return
 }
 
@@ -113,100 +113,100 @@ func.func @store_i1(%dst: memref<4xi1, #spv.storage_class<StorageBuffer>>, %i: i
 // perform special tricks.
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0,
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0,
       [
-        Kernel, Addresses, Int8, Int16, Int64, Float16, Float64], []>, #spv.resource_limits<>>
+        Kernel, Addresses, Int8, Int16, Int64, Float16, Float64], []>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @load_store_zero_rank_float
-func.func @load_store_zero_rank_float(%arg0: memref<f32, #spv.storage_class<CrossWorkgroup>>, %arg1: memref<f32, #spv.storage_class<CrossWorkgroup>>) {
-  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spv.storage_class<CrossWorkgroup>> to !spv.ptr<f32, CrossWorkgroup>
-  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spv.storage_class<CrossWorkgroup>> to !spv.ptr<f32, CrossWorkgroup>
-  //      CHECK: [[ZERO1:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.PtrAccessChain [[ARG0]][
+func.func @load_store_zero_rank_float(%arg0: memref<f32, #spirv.storage_class<CrossWorkgroup>>, %arg1: memref<f32, #spirv.storage_class<CrossWorkgroup>>) {
+  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<f32, CrossWorkgroup>
+  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<f32, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<f32, CrossWorkgroup>
+  //      CHECK: [[ZERO1:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.PtrAccessChain [[ARG0]][
   // CHECK-SAME: [[ZERO1]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Load "CrossWorkgroup" %{{.*}} : f32
-  %0 = memref.load %arg0[] : memref<f32, #spv.storage_class<CrossWorkgroup>>
-  //      CHECK: [[ZERO2:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.PtrAccessChain [[ARG1]][
+  //      CHECK: spirv.Load "CrossWorkgroup" %{{.*}} : f32
+  %0 = memref.load %arg0[] : memref<f32, #spirv.storage_class<CrossWorkgroup>>
+  //      CHECK: [[ZERO2:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.PtrAccessChain [[ARG1]][
   // CHECK-SAME: [[ZERO2]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Store "CrossWorkgroup" %{{.*}} : f32
-  memref.store %0, %arg1[] : memref<f32, #spv.storage_class<CrossWorkgroup>>
+  //      CHECK: spirv.Store "CrossWorkgroup" %{{.*}} : f32
+  memref.store %0, %arg1[] : memref<f32, #spirv.storage_class<CrossWorkgroup>>
   return
 }
 
 // CHECK-LABEL: @load_store_zero_rank_int
-func.func @load_store_zero_rank_int(%arg0: memref<i32, #spv.storage_class<CrossWorkgroup>>, %arg1: memref<i32, #spv.storage_class<CrossWorkgroup>>) {
-  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spv.storage_class<CrossWorkgroup>> to  !spv.ptr<i32, CrossWorkgroup>
-  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spv.storage_class<CrossWorkgroup>> to  !spv.ptr<i32, CrossWorkgroup>
-  //      CHECK: [[ZERO1:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.PtrAccessChain [[ARG0]][
+func.func @load_store_zero_rank_int(%arg0: memref<i32, #spirv.storage_class<CrossWorkgroup>>, %arg1: memref<i32, #spirv.storage_class<CrossWorkgroup>>) {
+  //      CHECK: [[ARG0:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spirv.storage_class<CrossWorkgroup>> to  !spirv.ptr<i32, CrossWorkgroup>
+  //      CHECK: [[ARG1:%.*]] = builtin.unrealized_conversion_cast {{.+}} : memref<i32, #spirv.storage_class<CrossWorkgroup>> to  !spirv.ptr<i32, CrossWorkgroup>
+  //      CHECK: [[ZERO1:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.PtrAccessChain [[ARG0]][
   // CHECK-SAME: [[ZERO1]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Load "CrossWorkgroup" %{{.*}} : i32
-  %0 = memref.load %arg0[] : memref<i32, #spv.storage_class<CrossWorkgroup>>
-  //      CHECK: [[ZERO2:%.*]] = spv.Constant 0 : i32
-  //      CHECK: spv.PtrAccessChain [[ARG1]][
+  //      CHECK: spirv.Load "CrossWorkgroup" %{{.*}} : i32
+  %0 = memref.load %arg0[] : memref<i32, #spirv.storage_class<CrossWorkgroup>>
+  //      CHECK: [[ZERO2:%.*]] = spirv.Constant 0 : i32
+  //      CHECK: spirv.PtrAccessChain [[ARG1]][
   // CHECK-SAME: [[ZERO2]]
   // CHECK-SAME: ] :
-  //      CHECK: spv.Store "CrossWorkgroup" %{{.*}} : i32
-  memref.store %0, %arg1[] : memref<i32, #spv.storage_class<CrossWorkgroup>>
+  //      CHECK: spirv.Store "CrossWorkgroup" %{{.*}} : i32
+  memref.store %0, %arg1[] : memref<i32, #spirv.storage_class<CrossWorkgroup>>
   return
 }
 
 // CHECK-LABEL: func @load_store_unknown_dim
-func.func @load_store_unknown_dim(%i: index, %source: memref<?xi32, #spv.storage_class<CrossWorkgroup>>, %dest: memref<?xi32, #spv.storage_class<CrossWorkgroup>>) {
-  // CHECK: %[[SRC:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spv.storage_class<CrossWorkgroup>> to !spv.ptr<i32, CrossWorkgroup>
-  // CHECK: %[[DST:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spv.storage_class<CrossWorkgroup>> to !spv.ptr<i32, CrossWorkgroup>
-  // CHECK: %[[AC0:.+]] = spv.PtrAccessChain %[[SRC]]
-  // CHECK: spv.Load "CrossWorkgroup" %[[AC0]]
-  %0 = memref.load %source[%i] : memref<?xi32, #spv.storage_class<CrossWorkgroup>>
-  // CHECK: %[[AC1:.+]] = spv.PtrAccessChain %[[DST]]
-  // CHECK: spv.Store "CrossWorkgroup" %[[AC1]]
-  memref.store %0, %dest[%i]: memref<?xi32, #spv.storage_class<CrossWorkgroup>>
+func.func @load_store_unknown_dim(%i: index, %source: memref<?xi32, #spirv.storage_class<CrossWorkgroup>>, %dest: memref<?xi32, #spirv.storage_class<CrossWorkgroup>>) {
+  // CHECK: %[[SRC:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<i32, CrossWorkgroup>
+  // CHECK: %[[DST:.+]] = builtin.unrealized_conversion_cast {{.+}} : memref<?xi32, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<i32, CrossWorkgroup>
+  // CHECK: %[[AC0:.+]] = spirv.PtrAccessChain %[[SRC]]
+  // CHECK: spirv.Load "CrossWorkgroup" %[[AC0]]
+  %0 = memref.load %source[%i] : memref<?xi32, #spirv.storage_class<CrossWorkgroup>>
+  // CHECK: %[[AC1:.+]] = spirv.PtrAccessChain %[[DST]]
+  // CHECK: spirv.Store "CrossWorkgroup" %[[AC1]]
+  memref.store %0, %dest[%i]: memref<?xi32, #spirv.storage_class<CrossWorkgroup>>
   return
 }
 
 // CHECK-LABEL: func @load_i1
-//  CHECK-SAME: (%[[SRC:.+]]: memref<4xi1, #spv.storage_class<CrossWorkgroup>>, %[[IDX:.+]]: index)
-func.func @load_i1(%src: memref<4xi1, #spv.storage_class<CrossWorkgroup>>, %i : index) -> i1 {
-  // CHECK-DAG: %[[SRC_CAST:.+]] = builtin.unrealized_conversion_cast %[[SRC]] : memref<4xi1, #spv.storage_class<CrossWorkgroup>> to !spv.ptr<i8, CrossWorkgroup>
+//  CHECK-SAME: (%[[SRC:.+]]: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>, %[[IDX:.+]]: index)
+func.func @load_i1(%src: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>, %i : index) -> i1 {
+  // CHECK-DAG: %[[SRC_CAST:.+]] = builtin.unrealized_conversion_cast %[[SRC]] : memref<4xi1, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<i8, CrossWorkgroup>
   // CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
-  // CHECK: %[[ZERO_0:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ZERO_1:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  // CHECK: %[[MUL:.+]] = spv.IMul %[[ONE]], %[[IDX_CAST]] : i32
-  // CHECK: %[[ADD:.+]] = spv.IAdd %[[ZERO_1]], %[[MUL]] : i32
-  // CHECK: %[[ADDR:.+]] = spv.PtrAccessChain %[[SRC_CAST]][%[[ADD]]]
-  // CHECK: %[[VAL:.+]] = spv.Load "CrossWorkgroup" %[[ADDR]] : i8
-  // CHECK: %[[ONE_I8:.+]] = spv.Constant 1 : i8
-  // CHECK: %[[BOOL:.+]] = spv.IEqual %[[VAL]], %[[ONE_I8]] : i8
-  %0 = memref.load %src[%i] : memref<4xi1, #spv.storage_class<CrossWorkgroup>>
+  // CHECK: %[[ZERO_0:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ZERO_1:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
+  // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO_1]], %[[MUL]] : i32
+  // CHECK: %[[ADDR:.+]] = spirv.PtrAccessChain %[[SRC_CAST]][%[[ADD]]]
+  // CHECK: %[[VAL:.+]] = spirv.Load "CrossWorkgroup" %[[ADDR]] : i8
+  // CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
+  // CHECK: %[[BOOL:.+]] = spirv.IEqual %[[VAL]], %[[ONE_I8]] : i8
+  %0 = memref.load %src[%i] : memref<4xi1, #spirv.storage_class<CrossWorkgroup>>
   // CHECK: return %[[BOOL]]
   return %0: i1
 }
 
 // CHECK-LABEL: func @store_i1
-//  CHECK-SAME: %[[DST:.+]]: memref<4xi1, #spv.storage_class<CrossWorkgroup>>,
+//  CHECK-SAME: %[[DST:.+]]: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>,
 //  CHECK-SAME: %[[IDX:.+]]: index
-func.func @store_i1(%dst: memref<4xi1, #spv.storage_class<CrossWorkgroup>>, %i: index) {
+func.func @store_i1(%dst: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>, %i: index) {
   %true = arith.constant true
-  // CHECK-DAG: %[[DST_CAST:.+]] = builtin.unrealized_conversion_cast %[[DST]] : memref<4xi1, #spv.storage_class<CrossWorkgroup>> to !spv.ptr<i8, CrossWorkgroup>
+  // CHECK-DAG: %[[DST_CAST:.+]] = builtin.unrealized_conversion_cast %[[DST]] : memref<4xi1, #spirv.storage_class<CrossWorkgroup>> to !spirv.ptr<i8, CrossWorkgroup>
   // CHECK-DAG: %[[IDX_CAST:.+]] = builtin.unrealized_conversion_cast %[[IDX]]
-  // CHECK: %[[ZERO_0:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ZERO_1:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  // CHECK: %[[MUL:.+]] = spv.IMul %[[ONE]], %[[IDX_CAST]] : i32
-  // CHECK: %[[ADD:.+]] = spv.IAdd %[[ZERO_1]], %[[MUL]] : i32
-  // CHECK: %[[ADDR:.+]] = spv.PtrAccessChain %[[DST_CAST]][%[[ADD]]]
-  // CHECK: %[[ZERO_I8:.+]] = spv.Constant 0 : i8
-  // CHECK: %[[ONE_I8:.+]] = spv.Constant 1 : i8
-  // CHECK: %[[RES:.+]] = spv.Select %{{.+}}, %[[ONE_I8]], %[[ZERO_I8]] : i1, i8
-  // CHECK: spv.Store "CrossWorkgroup" %[[ADDR]], %[[RES]] : i8
-  memref.store %true, %dst[%i]: memref<4xi1, #spv.storage_class<CrossWorkgroup>>
+  // CHECK: %[[ZERO_0:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ZERO_1:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  // CHECK: %[[MUL:.+]] = spirv.IMul %[[ONE]], %[[IDX_CAST]] : i32
+  // CHECK: %[[ADD:.+]] = spirv.IAdd %[[ZERO_1]], %[[MUL]] : i32
+  // CHECK: %[[ADDR:.+]] = spirv.PtrAccessChain %[[DST_CAST]][%[[ADD]]]
+  // CHECK: %[[ZERO_I8:.+]] = spirv.Constant 0 : i8
+  // CHECK: %[[ONE_I8:.+]] = spirv.Constant 1 : i8
+  // CHECK: %[[RES:.+]] = spirv.Select %{{.+}}, %[[ONE_I8]], %[[ZERO_I8]] : i1, i8
+  // CHECK: spirv.Store "CrossWorkgroup" %[[ADDR]], %[[RES]] : i8
+  memref.store %true, %dst[%i]: memref<4xi1, #spirv.storage_class<CrossWorkgroup>>
   return
 }
 
@@ -218,195 +218,195 @@ func.func @store_i1(%dst: memref<4xi1, #spv.storage_class<CrossWorkgroup>>, %i:
 // emulated via 32-bit types.
 // TODO: Test i64 types.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @load_i1
-func.func @load_i1(%arg0: memref<i1, #spv.storage_class<StorageBuffer>>) -> i1 {
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[FOUR1:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[QUOTIENT:.+]] = spv.SDiv %[[ZERO]], %[[FOUR1]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
-  //     CHECK: %[[LOAD:.+]] = spv.Load  "StorageBuffer" %[[PTR]]
-  //     CHECK: %[[FOUR2:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[EIGHT:.+]] = spv.Constant 8 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[ZERO]], %[[FOUR2]] : i32
-  //     CHECK: %[[BITS:.+]] = spv.IMul %[[IDX]], %[[EIGHT]] : i32
-  //     CHECK: %[[VALUE:.+]] = spv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Constant 255 : i32
-  //     CHECK: %[[T1:.+]] = spv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
-  //     CHECK: %[[T2:.+]] = spv.Constant 24 : i32
-  //     CHECK: %[[T3:.+]] = spv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
-  //     CHECK: %[[T4:.+]] = spv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
+func.func @load_i1(%arg0: memref<i1, #spirv.storage_class<StorageBuffer>>) -> i1 {
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[FOUR1:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR1]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
+  //     CHECK: %[[LOAD:.+]] = spirv.Load  "StorageBuffer" %[[PTR]]
+  //     CHECK: %[[FOUR2:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR2]] : i32
+  //     CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
+  //     CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Constant 255 : i32
+  //     CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+  //     CHECK: %[[T2:.+]] = spirv.Constant 24 : i32
+  //     CHECK: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
+  //     CHECK: %[[T4:.+]] = spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
   // Convert to i1 type.
-  //     CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  //     CHECK: %[[RES:.+]]  = spv.IEqual %[[T4]], %[[ONE]] : i32
+  //     CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  //     CHECK: %[[RES:.+]]  = spirv.IEqual %[[T4]], %[[ONE]] : i32
   //     CHECK: return %[[RES]]
-  %0 = memref.load %arg0[] : memref<i1, #spv.storage_class<StorageBuffer>>
+  %0 = memref.load %arg0[] : memref<i1, #spirv.storage_class<StorageBuffer>>
   return %0 : i1
 }
 
 // CHECK-LABEL: @load_i8
-func.func @load_i8(%arg0: memref<i8, #spv.storage_class<StorageBuffer>>) {
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[FOUR1:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[QUOTIENT:.+]] = spv.SDiv %[[ZERO]], %[[FOUR1]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
-  //     CHECK: %[[LOAD:.+]] = spv.Load  "StorageBuffer" %[[PTR]]
-  //     CHECK: %[[FOUR2:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[EIGHT:.+]] = spv.Constant 8 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[ZERO]], %[[FOUR2]] : i32
-  //     CHECK: %[[BITS:.+]] = spv.IMul %[[IDX]], %[[EIGHT]] : i32
-  //     CHECK: %[[VALUE:.+]] = spv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Constant 255 : i32
-  //     CHECK: %[[T1:.+]] = spv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
-  //     CHECK: %[[T2:.+]] = spv.Constant 24 : i32
-  //     CHECK: %[[T3:.+]] = spv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
-  //     CHECK: spv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
-  %0 = memref.load %arg0[] : memref<i8, #spv.storage_class<StorageBuffer>>
+func.func @load_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>) {
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[FOUR1:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR1]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
+  //     CHECK: %[[LOAD:.+]] = spirv.Load  "StorageBuffer" %[[PTR]]
+  //     CHECK: %[[FOUR2:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR2]] : i32
+  //     CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
+  //     CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Constant 255 : i32
+  //     CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+  //     CHECK: %[[T2:.+]] = spirv.Constant 24 : i32
+  //     CHECK: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
+  //     CHECK: spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
+  %0 = memref.load %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @load_i16
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: index)
-func.func @load_i16(%arg0: memref<10xi16, #spv.storage_class<StorageBuffer>>, %index : index) {
+func.func @load_i16(%arg0: memref<10xi16, #spirv.storage_class<StorageBuffer>>, %index : index) {
   //     CHECK: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : index to i32
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[OFFSET:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  //     CHECK: %[[UPDATE:.+]] = spv.IMul %[[ONE]], %[[ARG1_CAST]] : i32
-  //     CHECK: %[[FLAT_IDX:.+]] = spv.IAdd %[[OFFSET]], %[[UPDATE]] : i32
-  //     CHECK: %[[TWO1:.+]] = spv.Constant 2 : i32
-  //     CHECK: %[[QUOTIENT:.+]] = spv.SDiv %[[FLAT_IDX]], %[[TWO1]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
-  //     CHECK: %[[LOAD:.+]] = spv.Load  "StorageBuffer" %[[PTR]]
-  //     CHECK: %[[TWO2:.+]] = spv.Constant 2 : i32
-  //     CHECK: %[[SIXTEEN:.+]] = spv.Constant 16 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[FLAT_IDX]], %[[TWO2]] : i32
-  //     CHECK: %[[BITS:.+]] = spv.IMul %[[IDX]], %[[SIXTEEN]] : i32
-  //     CHECK: %[[VALUE:.+]] = spv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Constant 65535 : i32
-  //     CHECK: %[[T1:.+]] = spv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
-  //     CHECK: %[[T2:.+]] = spv.Constant 16 : i32
-  //     CHECK: %[[T3:.+]] = spv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
-  //     CHECK: spv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
-  %0 = memref.load %arg0[%index] : memref<10xi16, #spv.storage_class<StorageBuffer>>
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[OFFSET:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  //     CHECK: %[[UPDATE:.+]] = spirv.IMul %[[ONE]], %[[ARG1_CAST]] : i32
+  //     CHECK: %[[FLAT_IDX:.+]] = spirv.IAdd %[[OFFSET]], %[[UPDATE]] : i32
+  //     CHECK: %[[TWO1:.+]] = spirv.Constant 2 : i32
+  //     CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[FLAT_IDX]], %[[TWO1]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
+  //     CHECK: %[[LOAD:.+]] = spirv.Load  "StorageBuffer" %[[PTR]]
+  //     CHECK: %[[TWO2:.+]] = spirv.Constant 2 : i32
+  //     CHECK: %[[SIXTEEN:.+]] = spirv.Constant 16 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[FLAT_IDX]], %[[TWO2]] : i32
+  //     CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[SIXTEEN]] : i32
+  //     CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Constant 65535 : i32
+  //     CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+  //     CHECK: %[[T2:.+]] = spirv.Constant 16 : i32
+  //     CHECK: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
+  //     CHECK: spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
+  %0 = memref.load %arg0[%index] : memref<10xi16, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @load_i32
-func.func @load_i32(%arg0: memref<i32, #spv.storage_class<StorageBuffer>>) {
-  // CHECK-NOT: spv.SDiv
-  //     CHECK: spv.Load
-  // CHECK-NOT: spv.ShiftRightArithmetic
-  %0 = memref.load %arg0[] : memref<i32, #spv.storage_class<StorageBuffer>>
+func.func @load_i32(%arg0: memref<i32, #spirv.storage_class<StorageBuffer>>) {
+  // CHECK-NOT: spirv.SDiv
+  //     CHECK: spirv.Load
+  // CHECK-NOT: spirv.ShiftRightArithmetic
+  %0 = memref.load %arg0[] : memref<i32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @load_f32
-func.func @load_f32(%arg0: memref<f32, #spv.storage_class<StorageBuffer>>) {
-  // CHECK-NOT: spv.SDiv
-  //     CHECK: spv.Load
-  // CHECK-NOT: spv.ShiftRightArithmetic
-  %0 = memref.load %arg0[] : memref<f32, #spv.storage_class<StorageBuffer>>
+func.func @load_f32(%arg0: memref<f32, #spirv.storage_class<StorageBuffer>>) {
+  // CHECK-NOT: spirv.SDiv
+  //     CHECK: spirv.Load
+  // CHECK-NOT: spirv.ShiftRightArithmetic
+  %0 = memref.load %arg0[] : memref<f32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @store_i1
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: i1)
-func.func @store_i1(%arg0: memref<i1, #spv.storage_class<StorageBuffer>>, %value: i1) {
+func.func @store_i1(%arg0: memref<i1, #spirv.storage_class<StorageBuffer>>, %value: i1) {
   //     CHECK: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[FOUR:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[EIGHT:.+]] = spv.Constant 8 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[ZERO]], %[[FOUR]] : i32
-  //     CHECK: %[[OFFSET:.+]] = spv.IMul %[[IDX]], %[[EIGHT]] : i32
-  //     CHECK: %[[MASK1:.+]] = spv.Constant 255 : i32
-  //     CHECK: %[[TMP1:.+]] = spv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Not %[[TMP1]] : i32
-  //     CHECK: %[[ZERO1:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[ONE1:.+]] = spv.Constant 1 : i32
-  //     CHECK: %[[CASTED_ARG1:.+]] = spv.Select %[[ARG1]], %[[ONE1]], %[[ZERO1]] : i1, i32
-  //     CHECK: %[[CLAMPED_VAL:.+]] = spv.BitwiseAnd %[[CASTED_ARG1]], %[[MASK1]] : i32
-  //     CHECK: %[[STORE_VAL:.+]] = spv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[FOUR2:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[ACCESS_IDX:.+]] = spv.SDiv %[[ZERO]], %[[FOUR2]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
-  //     CHECK: spv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
-  //     CHECK: spv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
-  memref.store %value, %arg0[] : memref<i1, #spv.storage_class<StorageBuffer>>
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
+  //     CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
+  //     CHECK: %[[MASK1:.+]] = spirv.Constant 255 : i32
+  //     CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+  //     CHECK: %[[ZERO1:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[ONE1:.+]] = spirv.Constant 1 : i32
+  //     CHECK: %[[CASTED_ARG1:.+]] = spirv.Select %[[ARG1]], %[[ONE1]], %[[ZERO1]] : i1, i32
+  //     CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[CASTED_ARG1]], %[[MASK1]] : i32
+  //     CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[FOUR2:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR2]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
+  //     CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
+  //     CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+  memref.store %value, %arg0[] : memref<i1, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @store_i8
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: i8)
-func.func @store_i8(%arg0: memref<i8, #spv.storage_class<StorageBuffer>>, %value: i8) {
+func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %value: i8) {
   //     CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : i8 to i32
   //     CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[FOUR:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[EIGHT:.+]] = spv.Constant 8 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[ZERO]], %[[FOUR]] : i32
-  //     CHECK: %[[OFFSET:.+]] = spv.IMul %[[IDX]], %[[EIGHT]] : i32
-  //     CHECK: %[[MASK1:.+]] = spv.Constant 255 : i32
-  //     CHECK: %[[TMP1:.+]] = spv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Not %[[TMP1]] : i32
-  //     CHECK: %[[CLAMPED_VAL:.+]] = spv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
-  //     CHECK: %[[STORE_VAL:.+]] = spv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[FOUR2:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[ACCESS_IDX:.+]] = spv.SDiv %[[ZERO]], %[[FOUR2]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
-  //     CHECK: spv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
-  //     CHECK: spv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
-  memref.store %value, %arg0[] : memref<i8, #spv.storage_class<StorageBuffer>>
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
+  //     CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
+  //     CHECK: %[[MASK1:.+]] = spirv.Constant 255 : i32
+  //     CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+  //     CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
+  //     CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[FOUR2:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR2]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
+  //     CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
+  //     CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+  memref.store %value, %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @store_i16
-//       CHECK: (%[[ARG0:.+]]: memref<10xi16, #spv.storage_class<StorageBuffer>>, %[[ARG1:.+]]: index, %[[ARG2:.+]]: i16)
-func.func @store_i16(%arg0: memref<10xi16, #spv.storage_class<StorageBuffer>>, %index: index, %value: i16) {
+//       CHECK: (%[[ARG0:.+]]: memref<10xi16, #spirv.storage_class<StorageBuffer>>, %[[ARG1:.+]]: index, %[[ARG2:.+]]: i16)
+func.func @store_i16(%arg0: memref<10xi16, #spirv.storage_class<StorageBuffer>>, %index: index, %value: i16) {
   //     CHECK-DAG: %[[ARG2_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG2]] : i16 to i32
   //     CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
   //     CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : index to i32
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[OFFSET:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[ONE:.+]] = spv.Constant 1 : i32
-  //     CHECK: %[[UPDATE:.+]] = spv.IMul %[[ONE]], %[[ARG1_CAST]] : i32
-  //     CHECK: %[[FLAT_IDX:.+]] = spv.IAdd %[[OFFSET]], %[[UPDATE]] : i32
-  //     CHECK: %[[TWO:.+]] = spv.Constant 2 : i32
-  //     CHECK: %[[SIXTEEN:.+]] = spv.Constant 16 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[FLAT_IDX]], %[[TWO]] : i32
-  //     CHECK: %[[OFFSET:.+]] = spv.IMul %[[IDX]], %[[SIXTEEN]] : i32
-  //     CHECK: %[[MASK1:.+]] = spv.Constant 65535 : i32
-  //     CHECK: %[[TMP1:.+]] = spv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Not %[[TMP1]] : i32
-  //     CHECK: %[[CLAMPED_VAL:.+]] = spv.BitwiseAnd %[[ARG2_CAST]], %[[MASK1]] : i32
-  //     CHECK: %[[STORE_VAL:.+]] = spv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[TWO2:.+]] = spv.Constant 2 : i32
-  //     CHECK: %[[ACCESS_IDX:.+]] = spv.SDiv %[[FLAT_IDX]], %[[TWO2]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
-  //     CHECK: spv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
-  //     CHECK: spv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
-  memref.store %value, %arg0[%index] : memref<10xi16, #spv.storage_class<StorageBuffer>>
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[OFFSET:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[ONE:.+]] = spirv.Constant 1 : i32
+  //     CHECK: %[[UPDATE:.+]] = spirv.IMul %[[ONE]], %[[ARG1_CAST]] : i32
+  //     CHECK: %[[FLAT_IDX:.+]] = spirv.IAdd %[[OFFSET]], %[[UPDATE]] : i32
+  //     CHECK: %[[TWO:.+]] = spirv.Constant 2 : i32
+  //     CHECK: %[[SIXTEEN:.+]] = spirv.Constant 16 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[FLAT_IDX]], %[[TWO]] : i32
+  //     CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[SIXTEEN]] : i32
+  //     CHECK: %[[MASK1:.+]] = spirv.Constant 65535 : i32
+  //     CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+  //     CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG2_CAST]], %[[MASK1]] : i32
+  //     CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[TWO2:.+]] = spirv.Constant 2 : i32
+  //     CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[FLAT_IDX]], %[[TWO2]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
+  //     CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
+  //     CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+  memref.store %value, %arg0[%index] : memref<10xi16, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @store_i32
-func.func @store_i32(%arg0: memref<i32, #spv.storage_class<StorageBuffer>>, %value: i32) {
-  //     CHECK: spv.Store
-  // CHECK-NOT: spv.AtomicAnd
-  // CHECK-NOT: spv.AtomicOr
-  memref.store %value, %arg0[] : memref<i32, #spv.storage_class<StorageBuffer>>
+func.func @store_i32(%arg0: memref<i32, #spirv.storage_class<StorageBuffer>>, %value: i32) {
+  //     CHECK: spirv.Store
+  // CHECK-NOT: spirv.AtomicAnd
+  // CHECK-NOT: spirv.AtomicOr
+  memref.store %value, %arg0[] : memref<i32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @store_f32
-func.func @store_f32(%arg0: memref<f32, #spv.storage_class<StorageBuffer>>, %value: f32) {
-  //     CHECK: spv.Store
-  // CHECK-NOT: spv.AtomicAnd
-  // CHECK-NOT: spv.AtomicOr
-  memref.store %value, %arg0[] : memref<f32, #spv.storage_class<StorageBuffer>>
+func.func @store_f32(%arg0: memref<f32, #spirv.storage_class<StorageBuffer>>, %value: f32) {
+  //     CHECK: spirv.Store
+  // CHECK-NOT: spirv.AtomicAnd
+  // CHECK-NOT: spirv.AtomicOr
+  memref.store %value, %arg0[] : memref<f32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
@@ -417,71 +417,71 @@ func.func @store_f32(%arg0: memref<f32, #spv.storage_class<StorageBuffer>>, %val
 // Check that access chain indices are properly adjusted if non-16/32-bit types
 // are emulated via 32-bit types.
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Int16, StorageBuffer16BitAccess, Shader],
-    [SPV_KHR_storage_buffer_storage_class, SPV_KHR_16bit_storage]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Int16, StorageBuffer16BitAccess, Shader],
+    [SPV_KHR_storage_buffer_storage_class, SPV_KHR_16bit_storage]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @load_i8
-func.func @load_i8(%arg0: memref<i8, #spv.storage_class<StorageBuffer>>) {
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[FOUR1:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[QUOTIENT:.+]] = spv.SDiv %[[ZERO]], %[[FOUR1]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
-  //     CHECK: %[[LOAD:.+]] = spv.Load  "StorageBuffer" %[[PTR]]
-  //     CHECK: %[[FOUR2:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[EIGHT:.+]] = spv.Constant 8 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[ZERO]], %[[FOUR2]] : i32
-  //     CHECK: %[[BITS:.+]] = spv.IMul %[[IDX]], %[[EIGHT]] : i32
-  //     CHECK: %[[VALUE:.+]] = spv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Constant 255 : i32
-  //     CHECK: %[[T1:.+]] = spv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
-  //     CHECK: %[[T2:.+]] = spv.Constant 24 : i32
-  //     CHECK: %[[T3:.+]] = spv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
-  //     CHECK: spv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
-  %0 = memref.load %arg0[] : memref<i8, #spv.storage_class<StorageBuffer>>
+func.func @load_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>) {
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[FOUR1:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[QUOTIENT:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR1]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %{{.+}}[%[[ZERO]], %[[QUOTIENT]]]
+  //     CHECK: %[[LOAD:.+]] = spirv.Load  "StorageBuffer" %[[PTR]]
+  //     CHECK: %[[FOUR2:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR2]] : i32
+  //     CHECK: %[[BITS:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
+  //     CHECK: %[[VALUE:.+]] = spirv.ShiftRightArithmetic %[[LOAD]], %[[BITS]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Constant 255 : i32
+  //     CHECK: %[[T1:.+]] = spirv.BitwiseAnd %[[VALUE]], %[[MASK]] : i32
+  //     CHECK: %[[T2:.+]] = spirv.Constant 24 : i32
+  //     CHECK: %[[T3:.+]] = spirv.ShiftLeftLogical %[[T1]], %[[T2]] : i32, i32
+  //     CHECK: spirv.ShiftRightArithmetic %[[T3]], %[[T2]] : i32, i32
+  %0 = memref.load %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @load_i16
-func.func @load_i16(%arg0: memref<i16, #spv.storage_class<StorageBuffer>>) {
-  // CHECK-NOT: spv.SDiv
-  //     CHECK: spv.Load
-  // CHECK-NOT: spv.ShiftRightArithmetic
-  %0 = memref.load %arg0[] : memref<i16, #spv.storage_class<StorageBuffer>>
+func.func @load_i16(%arg0: memref<i16, #spirv.storage_class<StorageBuffer>>) {
+  // CHECK-NOT: spirv.SDiv
+  //     CHECK: spirv.Load
+  // CHECK-NOT: spirv.ShiftRightArithmetic
+  %0 = memref.load %arg0[] : memref<i16, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @store_i8
 //       CHECK: (%[[ARG0:.+]]: {{.*}}, %[[ARG1:.+]]: i8)
-func.func @store_i8(%arg0: memref<i8, #spv.storage_class<StorageBuffer>>, %value: i8) {
+func.func @store_i8(%arg0: memref<i8, #spirv.storage_class<StorageBuffer>>, %value: i8) {
   //     CHECK-DAG: %[[ARG1_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG1]] : i8 to i32
   //     CHECK-DAG: %[[ARG0_CAST:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
-  //     CHECK: %[[ZERO:.+]] = spv.Constant 0 : i32
-  //     CHECK: %[[FOUR:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[EIGHT:.+]] = spv.Constant 8 : i32
-  //     CHECK: %[[IDX:.+]] = spv.UMod %[[ZERO]], %[[FOUR]] : i32
-  //     CHECK: %[[OFFSET:.+]] = spv.IMul %[[IDX]], %[[EIGHT]] : i32
-  //     CHECK: %[[MASK1:.+]] = spv.Constant 255 : i32
-  //     CHECK: %[[TMP1:.+]] = spv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[MASK:.+]] = spv.Not %[[TMP1]] : i32
-  //     CHECK: %[[CLAMPED_VAL:.+]] = spv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
-  //     CHECK: %[[STORE_VAL:.+]] = spv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
-  //     CHECK: %[[FOUR2:.+]] = spv.Constant 4 : i32
-  //     CHECK: %[[ACCESS_IDX:.+]] = spv.SDiv %[[ZERO]], %[[FOUR2]] : i32
-  //     CHECK: %[[PTR:.+]] = spv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
-  //     CHECK: spv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
-  //     CHECK: spv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
-  memref.store %value, %arg0[] : memref<i8, #spv.storage_class<StorageBuffer>>
+  //     CHECK: %[[ZERO:.+]] = spirv.Constant 0 : i32
+  //     CHECK: %[[FOUR:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[EIGHT:.+]] = spirv.Constant 8 : i32
+  //     CHECK: %[[IDX:.+]] = spirv.UMod %[[ZERO]], %[[FOUR]] : i32
+  //     CHECK: %[[OFFSET:.+]] = spirv.IMul %[[IDX]], %[[EIGHT]] : i32
+  //     CHECK: %[[MASK1:.+]] = spirv.Constant 255 : i32
+  //     CHECK: %[[TMP1:.+]] = spirv.ShiftLeftLogical %[[MASK1]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[MASK:.+]] = spirv.Not %[[TMP1]] : i32
+  //     CHECK: %[[CLAMPED_VAL:.+]] = spirv.BitwiseAnd %[[ARG1_CAST]], %[[MASK1]] : i32
+  //     CHECK: %[[STORE_VAL:.+]] = spirv.ShiftLeftLogical %[[CLAMPED_VAL]], %[[OFFSET]] : i32, i32
+  //     CHECK: %[[FOUR2:.+]] = spirv.Constant 4 : i32
+  //     CHECK: %[[ACCESS_IDX:.+]] = spirv.SDiv %[[ZERO]], %[[FOUR2]] : i32
+  //     CHECK: %[[PTR:.+]] = spirv.AccessChain %[[ARG0_CAST]][%[[ZERO]], %[[ACCESS_IDX]]]
+  //     CHECK: spirv.AtomicAnd "Device" "AcquireRelease" %[[PTR]], %[[MASK]]
+  //     CHECK: spirv.AtomicOr "Device" "AcquireRelease" %[[PTR]], %[[STORE_VAL]]
+  memref.store %value, %arg0[] : memref<i8, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // CHECK-LABEL: @store_i16
-func.func @store_i16(%arg0: memref<10xi16, #spv.storage_class<StorageBuffer>>, %index: index, %value: i16) {
-  //     CHECK: spv.Store
-  // CHECK-NOT: spv.AtomicAnd
-  // CHECK-NOT: spv.AtomicOr
-  memref.store %value, %arg0[%index] : memref<10xi16, #spv.storage_class<StorageBuffer>>
+func.func @store_i16(%arg0: memref<10xi16, #spirv.storage_class<StorageBuffer>>, %index: index, %value: i16) {
+  //     CHECK: spirv.Store
+  // CHECK-NOT: spirv.AtomicAnd
+  // CHECK-NOT: spirv.AtomicOr
+  memref.store %value, %arg0[%index] : memref<10xi16, #spirv.storage_class<StorageBuffer>>
   return
 }
 

diff  --git a/mlir/test/Conversion/SCFToSPIRV/for.mlir b/mlir/test/Conversion/SCFToSPIRV/for.mlir
index 54a8e938187d..02558463b866 100644
--- a/mlir/test/Conversion/SCFToSPIRV/for.mlir
+++ b/mlir/test/Conversion/SCFToSPIRV/for.mlir
@@ -1,85 +1,85 @@
 // RUN: mlir-opt -convert-scf-to-spirv %s -o - | FileCheck %s
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
-func.func @loop_kernel(%arg2 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg3 : memref<10xf32, #spv.storage_class<StorageBuffer>>) {
-  // CHECK: %[[LB:.*]] = spv.Constant 4 : i32
+func.func @loop_kernel(%arg2 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg3 : memref<10xf32, #spirv.storage_class<StorageBuffer>>) {
+  // CHECK: %[[LB:.*]] = spirv.Constant 4 : i32
   %lb = arith.constant 4 : index
-  // CHECK: %[[UB:.*]] = spv.Constant 42 : i32
+  // CHECK: %[[UB:.*]] = spirv.Constant 42 : i32
   %ub = arith.constant 42 : index
-  // CHECK: %[[STEP:.*]] = spv.Constant 2 : i32
+  // CHECK: %[[STEP:.*]] = spirv.Constant 2 : i32
   %step = arith.constant 2 : index
-  // CHECK:      spv.mlir.loop {
-  // CHECK-NEXT:   spv.Branch ^[[HEADER:.*]](%[[LB]] : i32)
+  // CHECK:      spirv.mlir.loop {
+  // CHECK-NEXT:   spirv.Branch ^[[HEADER:.*]](%[[LB]] : i32)
   // CHECK:      ^[[HEADER]](%[[INDVAR:.*]]: i32):
-  // CHECK:        %[[CMP:.*]] = spv.SLessThan %[[INDVAR]], %[[UB]] : i32
-  // CHECK:        spv.BranchConditional %[[CMP]], ^[[BODY:.*]], ^[[MERGE:.*]]
+  // CHECK:        %[[CMP:.*]] = spirv.SLessThan %[[INDVAR]], %[[UB]] : i32
+  // CHECK:        spirv.BranchConditional %[[CMP]], ^[[BODY:.*]], ^[[MERGE:.*]]
   // CHECK:      ^[[BODY]]:
-  // CHECK:        %[[ZERO1:.*]] = spv.Constant 0 : i32
-  // CHECK:        %[[OFFSET1:.*]] = spv.Constant 0 : i32
-  // CHECK:        %[[STRIDE1:.*]] = spv.Constant 1 : i32
-  // CHECK:        %[[UPDATE1:.*]] = spv.IMul %[[STRIDE1]], %[[INDVAR]] : i32
-  // CHECK:        %[[INDEX1:.*]] = spv.IAdd %[[OFFSET1]], %[[UPDATE1]] : i32
-  // CHECK:        spv.AccessChain {{%.*}}{{\[}}%[[ZERO1]], %[[INDEX1]]{{\]}}
-  // CHECK:        %[[ZERO2:.*]] = spv.Constant 0 : i32
-  // CHECK:        %[[OFFSET2:.*]] = spv.Constant 0 : i32
-  // CHECK:        %[[STRIDE2:.*]] = spv.Constant 1 : i32
-  // CHECK:        %[[UPDATE2:.*]] = spv.IMul %[[STRIDE2]], %[[INDVAR]] : i32
-  // CHECK:        %[[INDEX2:.*]] = spv.IAdd %[[OFFSET2]], %[[UPDATE2]] : i32
-  // CHECK:        spv.AccessChain {{%.*}}[%[[ZERO2]], %[[INDEX2]]]
-  // CHECK:        %[[INCREMENT:.*]] = spv.IAdd %[[INDVAR]], %[[STEP]] : i32
-  // CHECK:        spv.Branch ^[[HEADER]](%[[INCREMENT]] : i32)
+  // CHECK:        %[[ZERO1:.*]] = spirv.Constant 0 : i32
+  // CHECK:        %[[OFFSET1:.*]] = spirv.Constant 0 : i32
+  // CHECK:        %[[STRIDE1:.*]] = spirv.Constant 1 : i32
+  // CHECK:        %[[UPDATE1:.*]] = spirv.IMul %[[STRIDE1]], %[[INDVAR]] : i32
+  // CHECK:        %[[INDEX1:.*]] = spirv.IAdd %[[OFFSET1]], %[[UPDATE1]] : i32
+  // CHECK:        spirv.AccessChain {{%.*}}{{\[}}%[[ZERO1]], %[[INDEX1]]{{\]}}
+  // CHECK:        %[[ZERO2:.*]] = spirv.Constant 0 : i32
+  // CHECK:        %[[OFFSET2:.*]] = spirv.Constant 0 : i32
+  // CHECK:        %[[STRIDE2:.*]] = spirv.Constant 1 : i32
+  // CHECK:        %[[UPDATE2:.*]] = spirv.IMul %[[STRIDE2]], %[[INDVAR]] : i32
+  // CHECK:        %[[INDEX2:.*]] = spirv.IAdd %[[OFFSET2]], %[[UPDATE2]] : i32
+  // CHECK:        spirv.AccessChain {{%.*}}[%[[ZERO2]], %[[INDEX2]]]
+  // CHECK:        %[[INCREMENT:.*]] = spirv.IAdd %[[INDVAR]], %[[STEP]] : i32
+  // CHECK:        spirv.Branch ^[[HEADER]](%[[INCREMENT]] : i32)
   // CHECK:      ^[[MERGE]]
-  // CHECK:        spv.mlir.merge
+  // CHECK:        spirv.mlir.merge
   // CHECK:      }
   scf.for %arg4 = %lb to %ub step %step {
-    %1 = memref.load %arg2[%arg4] : memref<10xf32, #spv.storage_class<StorageBuffer>>
-    memref.store %1, %arg3[%arg4] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+    %1 = memref.load %arg2[%arg4] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
+    memref.store %1, %arg3[%arg4] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
   }
   return
 }
 
 // CHECK-LABEL: @loop_yield
-func.func @loop_yield(%arg2 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg3 : memref<10xf32, #spv.storage_class<StorageBuffer>>) {
-  // CHECK: %[[LB:.*]] = spv.Constant 4 : i32
+func.func @loop_yield(%arg2 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg3 : memref<10xf32, #spirv.storage_class<StorageBuffer>>) {
+  // CHECK: %[[LB:.*]] = spirv.Constant 4 : i32
   %lb = arith.constant 4 : index
-  // CHECK: %[[UB:.*]] = spv.Constant 42 : i32
+  // CHECK: %[[UB:.*]] = spirv.Constant 42 : i32
   %ub = arith.constant 42 : index
-  // CHECK: %[[STEP:.*]] = spv.Constant 2 : i32
+  // CHECK: %[[STEP:.*]] = spirv.Constant 2 : i32
   %step = arith.constant 2 : index
-  // CHECK: %[[INITVAR1:.*]] = spv.Constant 0.000000e+00 : f32
+  // CHECK: %[[INITVAR1:.*]] = spirv.Constant 0.000000e+00 : f32
   %s0 = arith.constant 0.0 : f32
-  // CHECK: %[[INITVAR2:.*]] = spv.Constant 1.000000e+00 : f32
+  // CHECK: %[[INITVAR2:.*]] = spirv.Constant 1.000000e+00 : f32
   %s1 = arith.constant 1.0 : f32
-  // CHECK: %[[VAR1:.*]] = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: %[[VAR2:.*]] = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: spv.mlir.loop {
-  // CHECK:   spv.Branch ^[[HEADER:.*]](%[[LB]], %[[INITVAR1]], %[[INITVAR2]] : i32, f32, f32)
+  // CHECK: %[[VAR1:.*]] = spirv.Variable : !spirv.ptr<f32, Function>
+  // CHECK: %[[VAR2:.*]] = spirv.Variable : !spirv.ptr<f32, Function>
+  // CHECK: spirv.mlir.loop {
+  // CHECK:   spirv.Branch ^[[HEADER:.*]](%[[LB]], %[[INITVAR1]], %[[INITVAR2]] : i32, f32, f32)
   // CHECK: ^[[HEADER]](%[[INDVAR:.*]]: i32, %[[CARRIED1:.*]]: f32, %[[CARRIED2:.*]]: f32):
-  // CHECK:   %[[CMP:.*]] = spv.SLessThan %[[INDVAR]], %[[UB]] : i32
-  // CHECK:   spv.BranchConditional %[[CMP]], ^[[BODY:.*]], ^[[MERGE:.*]]
+  // CHECK:   %[[CMP:.*]] = spirv.SLessThan %[[INDVAR]], %[[UB]] : i32
+  // CHECK:   spirv.BranchConditional %[[CMP]], ^[[BODY:.*]], ^[[MERGE:.*]]
   // CHECK: ^[[BODY]]:
-  // CHECK:   %[[UPDATED:.*]] = spv.FAdd %[[CARRIED1]], %[[CARRIED1]] : f32
-  // CHECK-DAG:   %[[INCREMENT:.*]] = spv.IAdd %[[INDVAR]], %[[STEP]] : i32
-  // CHECK-DAG:   spv.Store "Function" %[[VAR1]], %[[UPDATED]] : f32
-  // CHECK-DAG:   spv.Store "Function" %[[VAR2]], %[[UPDATED]] : f32
-  // CHECK: spv.Branch ^[[HEADER]](%[[INCREMENT]], %[[UPDATED]], %[[UPDATED]] : i32, f32, f32)
+  // CHECK:   %[[UPDATED:.*]] = spirv.FAdd %[[CARRIED1]], %[[CARRIED1]] : f32
+  // CHECK-DAG:   %[[INCREMENT:.*]] = spirv.IAdd %[[INDVAR]], %[[STEP]] : i32
+  // CHECK-DAG:   spirv.Store "Function" %[[VAR1]], %[[UPDATED]] : f32
+  // CHECK-DAG:   spirv.Store "Function" %[[VAR2]], %[[UPDATED]] : f32
+  // CHECK: spirv.Branch ^[[HEADER]](%[[INCREMENT]], %[[UPDATED]], %[[UPDATED]] : i32, f32, f32)
   // CHECK: ^[[MERGE]]:
-  // CHECK:   spv.mlir.merge
+  // CHECK:   spirv.mlir.merge
   // CHECK: }
   %result:2 = scf.for %i0 = %lb to %ub step %step iter_args(%si = %s0, %sj = %s1) -> (f32, f32) {
     %sn = arith.addf %si, %si : f32
     scf.yield %sn, %sn : f32, f32
   }
-  // CHECK-DAG: %[[OUT1:.*]] = spv.Load "Function" %[[VAR1]] : f32
-  // CHECK-DAG: %[[OUT2:.*]] = spv.Load "Function" %[[VAR2]] : f32
-  // CHECK: spv.Store "StorageBuffer" {{%.*}}, %[[OUT1]] : f32
-  // CHECK: spv.Store "StorageBuffer" {{%.*}}, %[[OUT2]] : f32
-  memref.store %result#0, %arg3[%lb] : memref<10xf32, #spv.storage_class<StorageBuffer>>
-  memref.store %result#1, %arg3[%ub] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+  // CHECK-DAG: %[[OUT1:.*]] = spirv.Load "Function" %[[VAR1]] : f32
+  // CHECK-DAG: %[[OUT2:.*]] = spirv.Load "Function" %[[VAR2]] : f32
+  // CHECK: spirv.Store "StorageBuffer" {{%.*}}, %[[OUT1]] : f32
+  // CHECK: spirv.Store "StorageBuffer" {{%.*}}, %[[OUT2]] : f32
+  memref.store %result#0, %arg3[%lb] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
+  memref.store %result#1, %arg3[%ub] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
   return
 }
 

diff  --git a/mlir/test/Conversion/SCFToSPIRV/if.mlir b/mlir/test/Conversion/SCFToSPIRV/if.mlir
index d8463b9db2be..3a2de3338c3f 100644
--- a/mlir/test/Conversion/SCFToSPIRV/if.mlir
+++ b/mlir/test/Conversion/SCFToSPIRV/if.mlir
@@ -1,110 +1,110 @@
 // RUN: mlir-opt -convert-scf-to-spirv %s -o - | FileCheck %s
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @kernel_simple_selection
-func.func @kernel_simple_selection(%arg2 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg3 : i1) {
+func.func @kernel_simple_selection(%arg2 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg3 : i1) {
   %value = arith.constant 0.0 : f32
   %i = arith.constant 0 : index
 
-  // CHECK:       spv.mlir.selection {
-  // CHECK-NEXT:    spv.BranchConditional {{%.*}}, [[TRUE:\^.*]], [[MERGE:\^.*]]
+  // CHECK:       spirv.mlir.selection {
+  // CHECK-NEXT:    spirv.BranchConditional {{%.*}}, [[TRUE:\^.*]], [[MERGE:\^.*]]
   // CHECK-NEXT:  [[TRUE]]:
-  // CHECK:         spv.Branch [[MERGE]]
+  // CHECK:         spirv.Branch [[MERGE]]
   // CHECK-NEXT:  [[MERGE]]:
-  // CHECK-NEXT:    spv.mlir.merge
+  // CHECK-NEXT:    spirv.mlir.merge
   // CHECK-NEXT:  }
-  // CHECK-NEXT:  spv.Return
+  // CHECK-NEXT:  spirv.Return
 
   scf.if %arg3 {
-    memref.store %value, %arg2[%i] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+    memref.store %value, %arg2[%i] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
   }
   return
 }
 
 // CHECK-LABEL: @kernel_nested_selection
-func.func @kernel_nested_selection(%arg3 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg4 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg5 : i1, %arg6 : i1) {
+func.func @kernel_nested_selection(%arg3 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg4 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg5 : i1, %arg6 : i1) {
   %i = arith.constant 0 : index
   %j = arith.constant 9 : index
 
-  // CHECK:       spv.mlir.selection {
-  // CHECK-NEXT:    spv.BranchConditional {{%.*}}, [[TRUE_TOP:\^.*]], [[FALSE_TOP:\^.*]]
+  // CHECK:       spirv.mlir.selection {
+  // CHECK-NEXT:    spirv.BranchConditional {{%.*}}, [[TRUE_TOP:\^.*]], [[FALSE_TOP:\^.*]]
   // CHECK-NEXT:  [[TRUE_TOP]]:
-  // CHECK-NEXT:    spv.mlir.selection {
-  // CHECK-NEXT:      spv.BranchConditional {{%.*}}, [[TRUE_NESTED_TRUE_PATH:\^.*]], [[FALSE_NESTED_TRUE_PATH:\^.*]]
+  // CHECK-NEXT:    spirv.mlir.selection {
+  // CHECK-NEXT:      spirv.BranchConditional {{%.*}}, [[TRUE_NESTED_TRUE_PATH:\^.*]], [[FALSE_NESTED_TRUE_PATH:\^.*]]
   // CHECK-NEXT:    [[TRUE_NESTED_TRUE_PATH]]:
-  // CHECK:           spv.Branch [[MERGE_NESTED_TRUE_PATH:\^.*]]
+  // CHECK:           spirv.Branch [[MERGE_NESTED_TRUE_PATH:\^.*]]
   // CHECK-NEXT:    [[FALSE_NESTED_TRUE_PATH]]:
-  // CHECK:           spv.Branch [[MERGE_NESTED_TRUE_PATH]]
+  // CHECK:           spirv.Branch [[MERGE_NESTED_TRUE_PATH]]
   // CHECK-NEXT:    [[MERGE_NESTED_TRUE_PATH]]:
-  // CHECK-NEXT:      spv.mlir.merge
+  // CHECK-NEXT:      spirv.mlir.merge
   // CHECK-NEXT:    }
-  // CHECK-NEXT:    spv.Branch [[MERGE_TOP:\^.*]]
+  // CHECK-NEXT:    spirv.Branch [[MERGE_TOP:\^.*]]
   // CHECK-NEXT:  [[FALSE_TOP]]:
-  // CHECK-NEXT:    spv.mlir.selection {
-  // CHECK-NEXT:      spv.BranchConditional {{%.*}}, [[TRUE_NESTED_FALSE_PATH:\^.*]], [[FALSE_NESTED_FALSE_PATH:\^.*]]
+  // CHECK-NEXT:    spirv.mlir.selection {
+  // CHECK-NEXT:      spirv.BranchConditional {{%.*}}, [[TRUE_NESTED_FALSE_PATH:\^.*]], [[FALSE_NESTED_FALSE_PATH:\^.*]]
   // CHECK-NEXT:    [[TRUE_NESTED_FALSE_PATH]]:
-  // CHECK:           spv.Branch [[MERGE_NESTED_FALSE_PATH:\^.*]]
+  // CHECK:           spirv.Branch [[MERGE_NESTED_FALSE_PATH:\^.*]]
   // CHECK-NEXT:    [[FALSE_NESTED_FALSE_PATH]]:
-  // CHECK:           spv.Branch [[MERGE_NESTED_FALSE_PATH]]
+  // CHECK:           spirv.Branch [[MERGE_NESTED_FALSE_PATH]]
   // CHECK:         [[MERGE_NESTED_FALSE_PATH]]:
-  // CHECK-NEXT:      spv.mlir.merge
+  // CHECK-NEXT:      spirv.mlir.merge
   // CHECK-NEXT:    }
-  // CHECK-NEXT:    spv.Branch [[MERGE_TOP]]
+  // CHECK-NEXT:    spirv.Branch [[MERGE_TOP]]
   // CHECK-NEXT:  [[MERGE_TOP]]:
-  // CHECK-NEXT:    spv.mlir.merge
+  // CHECK-NEXT:    spirv.mlir.merge
   // CHECK-NEXT:  }
-  // CHECK-NEXT:  spv.Return
+  // CHECK-NEXT:  spirv.Return
 
   scf.if %arg5 {
     scf.if %arg6 {
-      %value = memref.load %arg3[%i] : memref<10xf32, #spv.storage_class<StorageBuffer>>
-      memref.store %value, %arg4[%i] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+      %value = memref.load %arg3[%i] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
+      memref.store %value, %arg4[%i] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
     } else {
-      %value = memref.load %arg4[%i] : memref<10xf32, #spv.storage_class<StorageBuffer>>
-      memref.store %value, %arg3[%i] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+      %value = memref.load %arg4[%i] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
+      memref.store %value, %arg3[%i] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
     }
   } else {
     scf.if %arg6 {
-      %value = memref.load %arg3[%j] : memref<10xf32, #spv.storage_class<StorageBuffer>>
-      memref.store %value, %arg4[%j] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+      %value = memref.load %arg3[%j] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
+      memref.store %value, %arg4[%j] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
     } else {
-      %value = memref.load %arg4[%j] : memref<10xf32, #spv.storage_class<StorageBuffer>>
-      memref.store %value, %arg3[%j] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+      %value = memref.load %arg4[%j] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
+      memref.store %value, %arg3[%j] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
     }
   }
   return
 }
 
 // CHECK-LABEL: @simple_if_yield
-func.func @simple_if_yield(%arg2 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg3 : i1) {
-  // CHECK: %[[VAR1:.*]] = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK: %[[VAR2:.*]] = spv.Variable : !spv.ptr<f32, Function>
-  // CHECK:       spv.mlir.selection {
-  // CHECK-NEXT:    spv.BranchConditional {{%.*}}, [[TRUE:\^.*]], [[FALSE:\^.*]]
+func.func @simple_if_yield(%arg2 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg3 : i1) {
+  // CHECK: %[[VAR1:.*]] = spirv.Variable : !spirv.ptr<f32, Function>
+  // CHECK: %[[VAR2:.*]] = spirv.Variable : !spirv.ptr<f32, Function>
+  // CHECK:       spirv.mlir.selection {
+  // CHECK-NEXT:    spirv.BranchConditional {{%.*}}, [[TRUE:\^.*]], [[FALSE:\^.*]]
   // CHECK-NEXT:  [[TRUE]]:
-  // CHECK:         %[[RET1TRUE:.*]] = spv.Constant 0.000000e+00 : f32
-  // CHECK:         %[[RET2TRUE:.*]] = spv.Constant 1.000000e+00 : f32
-  // CHECK-DAG:     spv.Store "Function" %[[VAR1]], %[[RET1TRUE]] : f32
-  // CHECK-DAG:     spv.Store "Function" %[[VAR2]], %[[RET2TRUE]] : f32
-  // CHECK:         spv.Branch ^[[MERGE:.*]]
+  // CHECK:         %[[RET1TRUE:.*]] = spirv.Constant 0.000000e+00 : f32
+  // CHECK:         %[[RET2TRUE:.*]] = spirv.Constant 1.000000e+00 : f32
+  // CHECK-DAG:     spirv.Store "Function" %[[VAR1]], %[[RET1TRUE]] : f32
+  // CHECK-DAG:     spirv.Store "Function" %[[VAR2]], %[[RET2TRUE]] : f32
+  // CHECK:         spirv.Branch ^[[MERGE:.*]]
   // CHECK-NEXT:  [[FALSE]]:
-  // CHECK:         %[[RET2FALSE:.*]] = spv.Constant 2.000000e+00 : f32
-  // CHECK:         %[[RET1FALSE:.*]] = spv.Constant 3.000000e+00 : f32
-  // CHECK-DAG:     spv.Store "Function" %[[VAR1]], %[[RET1FALSE]] : f32
-  // CHECK-DAG:     spv.Store "Function" %[[VAR2]], %[[RET2FALSE]] : f32
-  // CHECK:         spv.Branch ^[[MERGE]]
+  // CHECK:         %[[RET2FALSE:.*]] = spirv.Constant 2.000000e+00 : f32
+  // CHECK:         %[[RET1FALSE:.*]] = spirv.Constant 3.000000e+00 : f32
+  // CHECK-DAG:     spirv.Store "Function" %[[VAR1]], %[[RET1FALSE]] : f32
+  // CHECK-DAG:     spirv.Store "Function" %[[VAR2]], %[[RET2FALSE]] : f32
+  // CHECK:         spirv.Branch ^[[MERGE]]
   // CHECK-NEXT:  ^[[MERGE]]:
-  // CHECK:         spv.mlir.merge
+  // CHECK:         spirv.mlir.merge
   // CHECK-NEXT:  }
-  // CHECK-DAG:   %[[OUT1:.*]] = spv.Load "Function" %[[VAR1]] : f32
-  // CHECK-DAG:   %[[OUT2:.*]] = spv.Load "Function" %[[VAR2]] : f32
-  // CHECK:       spv.Store "StorageBuffer" {{%.*}}, %[[OUT1]] : f32
-  // CHECK:       spv.Store "StorageBuffer" {{%.*}}, %[[OUT2]] : f32
-  // CHECK:       spv.Return
+  // CHECK-DAG:   %[[OUT1:.*]] = spirv.Load "Function" %[[VAR1]] : f32
+  // CHECK-DAG:   %[[OUT2:.*]] = spirv.Load "Function" %[[VAR2]] : f32
+  // CHECK:       spirv.Store "StorageBuffer" {{%.*}}, %[[OUT1]] : f32
+  // CHECK:       spirv.Store "StorageBuffer" {{%.*}}, %[[OUT2]] : f32
+  // CHECK:       spirv.Return
   %0:2 = scf.if %arg3 -> (f32, f32) {
     %c0 = arith.constant 0.0 : f32
     %c1 = arith.constant 1.0 : f32
@@ -116,40 +116,40 @@ func.func @simple_if_yield(%arg2 : memref<10xf32, #spv.storage_class<StorageBuff
   }
   %i = arith.constant 0 : index
   %j = arith.constant 1 : index
-  memref.store %0#0, %arg2[%i] : memref<10xf32, #spv.storage_class<StorageBuffer>>
-  memref.store %0#1, %arg2[%j] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+  memref.store %0#0, %arg2[%i] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
+  memref.store %0#1, %arg2[%j] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
   return
 }
 
 // TODO: The transformation should only be legal if VariablePointer capability
 // is supported. This test is still useful to make sure we can handle scf op
 // result with type change.
-func.func @simple_if_yield_type_change(%arg2 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg3 : memref<10xf32, #spv.storage_class<StorageBuffer>>, %arg4 : i1) {
+func.func @simple_if_yield_type_change(%arg2 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg3 : memref<10xf32, #spirv.storage_class<StorageBuffer>>, %arg4 : i1) {
   // CHECK-LABEL: @simple_if_yield_type_change
-  // CHECK:       %[[VAR:.*]] = spv.Variable : !spv.ptr<!spv.ptr<!spv.struct<(!spv.array<10 x f32, stride=4> [0])>, StorageBuffer>, Function>
-  // CHECK:       spv.mlir.selection {
-  // CHECK-NEXT:    spv.BranchConditional {{%.*}}, [[TRUE:\^.*]], [[FALSE:\^.*]]
+  // CHECK:       %[[VAR:.*]] = spirv.Variable : !spirv.ptr<!spirv.ptr<!spirv.struct<(!spirv.array<10 x f32, stride=4> [0])>, StorageBuffer>, Function>
+  // CHECK:       spirv.mlir.selection {
+  // CHECK-NEXT:    spirv.BranchConditional {{%.*}}, [[TRUE:\^.*]], [[FALSE:\^.*]]
   // CHECK-NEXT:  [[TRUE]]:
-  // CHECK:         spv.Store "Function" %[[VAR]], {{%.*}} : !spv.ptr<!spv.struct<(!spv.array<10 x f32, stride=4> [0])>, StorageBuffer>
-  // CHECK:         spv.Branch ^[[MERGE:.*]]
+  // CHECK:         spirv.Store "Function" %[[VAR]], {{%.*}} : !spirv.ptr<!spirv.struct<(!spirv.array<10 x f32, stride=4> [0])>, StorageBuffer>
+  // CHECK:         spirv.Branch ^[[MERGE:.*]]
   // CHECK-NEXT:  [[FALSE]]:
-  // CHECK:         spv.Store "Function" %[[VAR]], {{%.*}} : !spv.ptr<!spv.struct<(!spv.array<10 x f32, stride=4> [0])>, StorageBuffer>
-  // CHECK:         spv.Branch ^[[MERGE]]
+  // CHECK:         spirv.Store "Function" %[[VAR]], {{%.*}} : !spirv.ptr<!spirv.struct<(!spirv.array<10 x f32, stride=4> [0])>, StorageBuffer>
+  // CHECK:         spirv.Branch ^[[MERGE]]
   // CHECK-NEXT:  ^[[MERGE]]:
-  // CHECK:         spv.mlir.merge
+  // CHECK:         spirv.mlir.merge
   // CHECK-NEXT:  }
-  // CHECK:       %[[OUT:.*]] = spv.Load "Function" %[[VAR]] : !spv.ptr<!spv.struct<(!spv.array<10 x f32, stride=4> [0])>, StorageBuffer>
-  // CHECK:       %[[ADD:.*]] = spv.AccessChain %[[OUT]][{{%.*}}, {{%.*}}] : !spv.ptr<!spv.struct<(!spv.array<10 x f32, stride=4> [0])>, StorageBuffer>
-  // CHECK:       spv.Store "StorageBuffer" %[[ADD]], {{%.*}} : f32
-  // CHECK:       spv.Return
+  // CHECK:       %[[OUT:.*]] = spirv.Load "Function" %[[VAR]] : !spirv.ptr<!spirv.struct<(!spirv.array<10 x f32, stride=4> [0])>, StorageBuffer>
+  // CHECK:       %[[ADD:.*]] = spirv.AccessChain %[[OUT]][{{%.*}}, {{%.*}}] : !spirv.ptr<!spirv.struct<(!spirv.array<10 x f32, stride=4> [0])>, StorageBuffer>
+  // CHECK:       spirv.Store "StorageBuffer" %[[ADD]], {{%.*}} : f32
+  // CHECK:       spirv.Return
   %i = arith.constant 0 : index
   %value = arith.constant 0.0 : f32
-  %0 = scf.if %arg4 -> (memref<10xf32, #spv.storage_class<StorageBuffer>>) {
-    scf.yield %arg2 : memref<10xf32, #spv.storage_class<StorageBuffer>>
+  %0 = scf.if %arg4 -> (memref<10xf32, #spirv.storage_class<StorageBuffer>>) {
+    scf.yield %arg2 : memref<10xf32, #spirv.storage_class<StorageBuffer>>
   } else {
-    scf.yield %arg3 : memref<10xf32, #spv.storage_class<StorageBuffer>>
+    scf.yield %arg3 : memref<10xf32, #spirv.storage_class<StorageBuffer>>
   }
-  memref.store %value, %0[%i] : memref<10xf32, #spv.storage_class<StorageBuffer>>
+  memref.store %value, %0[%i] : memref<10xf32, #spirv.storage_class<StorageBuffer>>
   return
 }
 

diff  --git a/mlir/test/Conversion/SCFToSPIRV/while.mlir b/mlir/test/Conversion/SCFToSPIRV/while.mlir
index ac3b18a35efd..a7e07a708603 100644
--- a/mlir/test/Conversion/SCFToSPIRV/while.mlir
+++ b/mlir/test/Conversion/SCFToSPIRV/while.mlir
@@ -1,26 +1,26 @@
 // RUN: mlir-opt -allow-unregistered-dialect -convert-scf-to-spirv %s -o - | FileCheck %s
 
 module attributes {
-  spv.target_env = #spv.target_env<
-    #spv.vce<v1.0, [Shader, Int64], [SPV_KHR_storage_buffer_storage_class]>, #spv.resource_limits<>>
+  spirv.target_env = #spirv.target_env<
+    #spirv.vce<v1.0, [Shader, Int64], [SPV_KHR_storage_buffer_storage_class]>, #spirv.resource_limits<>>
 } {
 
 // CHECK-LABEL: @while_loop1
 func.func @while_loop1(%arg0: i32, %arg1: i32) -> i32 {
   // CHECK-SAME: (%[[ARG1:.*]]: i32, %[[ARG2:.*]]: i32)
-  // CHECK: %[[INITVAR:.*]] = spv.Constant 2 : i32
-  // CHECK: %[[VAR1:.*]] = spv.Variable : !spv.ptr<i32, Function>
-  // CHECK: spv.mlir.loop {
-  // CHECK:   spv.Branch ^[[HEADER:.*]](%[[ARG1]] : i32)
+  // CHECK: %[[INITVAR:.*]] = spirv.Constant 2 : i32
+  // CHECK: %[[VAR1:.*]] = spirv.Variable : !spirv.ptr<i32, Function>
+  // CHECK: spirv.mlir.loop {
+  // CHECK:   spirv.Branch ^[[HEADER:.*]](%[[ARG1]] : i32)
   // CHECK: ^[[HEADER]](%[[INDVAR1:.*]]: i32):
-  // CHECK:   %[[CMP:.*]] = spv.SLessThan %[[INDVAR1]], %[[ARG2]] : i32
-  // CHECK:   spv.Store "Function" %[[VAR1]], %[[INDVAR1]] : i32
-  // CHECK:   spv.BranchConditional %[[CMP]], ^[[BODY:.*]](%[[INDVAR1]] : i32), ^[[MERGE:.*]]
+  // CHECK:   %[[CMP:.*]] = spirv.SLessThan %[[INDVAR1]], %[[ARG2]] : i32
+  // CHECK:   spirv.Store "Function" %[[VAR1]], %[[INDVAR1]] : i32
+  // CHECK:   spirv.BranchConditional %[[CMP]], ^[[BODY:.*]](%[[INDVAR1]] : i32), ^[[MERGE:.*]]
   // CHECK: ^[[BODY]](%[[INDVAR2:.*]]: i32):
-  // CHECK:   %[[UPDATED:.*]] = spv.IMul %[[INDVAR2]], %[[INITVAR]] : i32
-  // CHECK: spv.Branch ^[[HEADER]](%[[UPDATED]] : i32)
+  // CHECK:   %[[UPDATED:.*]] = spirv.IMul %[[INDVAR2]], %[[INITVAR]] : i32
+  // CHECK: spirv.Branch ^[[HEADER]](%[[UPDATED]] : i32)
   // CHECK: ^[[MERGE]]:
-  // CHECK:   spv.mlir.merge
+  // CHECK:   spirv.mlir.merge
   // CHECK: }
   %c2_i32 = arith.constant 2 : i32
   %0 = scf.while (%arg3 = %arg0) : (i32) -> (i32) {
@@ -31,8 +31,8 @@ func.func @while_loop1(%arg0: i32, %arg1: i32) -> i32 {
     %1 = arith.muli %arg5, %c2_i32 : i32
     scf.yield %1 : i32
   }
-  // CHECK: %[[OUT:.*]] = spv.Load "Function" %[[VAR1]] : i32
-  // CHECK: spv.ReturnValue %[[OUT]] : i32
+  // CHECK: %[[OUT:.*]] = spirv.Load "Function" %[[VAR1]] : i32
+  // CHECK: spirv.ReturnValue %[[OUT]] : i32
   return %0 : i32
 }
 
@@ -41,19 +41,19 @@ func.func @while_loop1(%arg0: i32, %arg1: i32) -> i32 {
 // CHECK-LABEL: @while_loop2
 func.func @while_loop2(%arg0: f32) -> i64 {
   // CHECK-SAME: (%[[ARG:.*]]: f32)
-  // CHECK: %[[VAR:.*]] = spv.Variable : !spv.ptr<i64, Function>
-  // CHECK: spv.mlir.loop {
-  // CHECK:   spv.Branch ^[[HEADER:.*]](%[[ARG]] : f32)
+  // CHECK: %[[VAR:.*]] = spirv.Variable : !spirv.ptr<i64, Function>
+  // CHECK: spirv.mlir.loop {
+  // CHECK:   spirv.Branch ^[[HEADER:.*]](%[[ARG]] : f32)
   // CHECK: ^[[HEADER]](%[[INDVAR1:.*]]: f32):
   // CHECK:   %[[SHARED:.*]] = "foo.shared_compute"(%[[INDVAR1]]) : (f32) -> i64
   // CHECK:   %[[CMP:.*]] = "foo.evaluate_condition"(%[[INDVAR1]], %[[SHARED]]) : (f32, i64) -> i1
-  // CHECK:   spv.Store "Function" %[[VAR]], %[[SHARED]] : i64
-  // CHECK:   spv.BranchConditional %[[CMP]], ^[[BODY:.*]](%[[SHARED]] : i64), ^[[MERGE:.*]]
+  // CHECK:   spirv.Store "Function" %[[VAR]], %[[SHARED]] : i64
+  // CHECK:   spirv.BranchConditional %[[CMP]], ^[[BODY:.*]](%[[SHARED]] : i64), ^[[MERGE:.*]]
   // CHECK: ^[[BODY]](%[[INDVAR2:.*]]: i64):
   // CHECK:   %[[UPDATED:.*]] = "foo.payload"(%[[INDVAR2]]) : (i64) -> f32
-  // CHECK: spv.Branch ^[[HEADER]](%[[UPDATED]] : f32)
+  // CHECK: spirv.Branch ^[[HEADER]](%[[UPDATED]] : f32)
   // CHECK: ^[[MERGE]]:
-  // CHECK:   spv.mlir.merge
+  // CHECK:   spirv.mlir.merge
   // CHECK: }
   %res = scf.while (%arg1 = %arg0) : (f32) -> i64 {
     %shared = "foo.shared_compute"(%arg1) : (f32) -> i64
@@ -64,8 +64,8 @@ func.func @while_loop2(%arg0: f32) -> i64 {
     %res = "foo.payload"(%arg2) : (i64) -> f32
     scf.yield %res : f32
   }
-  // CHECK: %[[OUT:.*]] = spv.Load "Function" %[[VAR]] : i64
-  // CHECK: spv.ReturnValue %[[OUT]] : i64
+  // CHECK: %[[OUT:.*]] = spirv.Load "Function" %[[VAR]] : i64
+  // CHECK: spirv.ReturnValue %[[OUT]] : i64
   return %res : i64
 }
 

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
index 5a1a3eb6209b..dbbf8610afb4 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/arithmetic-ops-to-llvm.mlir
@@ -1,235 +1,235 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.IAdd
+// spirv.IAdd
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @iadd_scalar
-spv.func @iadd_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @iadd_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.add %{{.*}}, %{{.*}} : i32
-  %0 = spv.IAdd %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.IAdd %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @iadd_vector
-spv.func @iadd_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
+spirv.func @iadd_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
   // CHECK: llvm.add %{{.*}}, %{{.*}} : vector<4xi64>
-  %0 = spv.IAdd %arg0, %arg1 : vector<4xi64>
-  spv.Return
+  %0 = spirv.IAdd %arg0, %arg1 : vector<4xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ISub
+// spirv.ISub
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @isub_scalar
-spv.func @isub_scalar(%arg0: i8, %arg1: i8) "None" {
+spirv.func @isub_scalar(%arg0: i8, %arg1: i8) "None" {
   // CHECK: llvm.sub %{{.*}}, %{{.*}} : i8
-  %0 = spv.ISub %arg0, %arg1 : i8
-  spv.Return
+  %0 = spirv.ISub %arg0, %arg1 : i8
+  spirv.Return
 }
 
 // CHECK-LABEL: @isub_vector
-spv.func @isub_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) "None" {
+spirv.func @isub_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) "None" {
   // CHECK: llvm.sub %{{.*}}, %{{.*}} : vector<2xi16>
-  %0 = spv.ISub %arg0, %arg1 : vector<2xi16>
-  spv.Return
+  %0 = spirv.ISub %arg0, %arg1 : vector<2xi16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.IMul
+// spirv.IMul
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @imul_scalar
-spv.func @imul_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @imul_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.mul %{{.*}}, %{{.*}} : i32
-  %0 = spv.IMul %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.IMul %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @imul_vector
-spv.func @imul_vector(%arg0: vector<3xi32>, %arg1: vector<3xi32>) "None" {
+spirv.func @imul_vector(%arg0: vector<3xi32>, %arg1: vector<3xi32>) "None" {
   // CHECK: llvm.mul %{{.*}}, %{{.*}} : vector<3xi32>
-  %0 = spv.IMul %arg0, %arg1 : vector<3xi32>
-  spv.Return
+  %0 = spirv.IMul %arg0, %arg1 : vector<3xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FAdd
+// spirv.FAdd
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fadd_scalar
-spv.func @fadd_scalar(%arg0: f16, %arg1: f16) "None" {
+spirv.func @fadd_scalar(%arg0: f16, %arg1: f16) "None" {
   // CHECK: llvm.fadd %{{.*}}, %{{.*}} : f16
-  %0 = spv.FAdd %arg0, %arg1 : f16
-  spv.Return
+  %0 = spirv.FAdd %arg0, %arg1 : f16
+  spirv.Return
 }
 
 // CHECK-LABEL: @fadd_vector
-spv.func @fadd_vector(%arg0: vector<4xf32>, %arg1: vector<4xf32>) "None" {
+spirv.func @fadd_vector(%arg0: vector<4xf32>, %arg1: vector<4xf32>) "None" {
   // CHECK: llvm.fadd %{{.*}}, %{{.*}} : vector<4xf32>
-  %0 = spv.FAdd %arg0, %arg1 : vector<4xf32>
-  spv.Return
+  %0 = spirv.FAdd %arg0, %arg1 : vector<4xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FSub
+// spirv.FSub
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fsub_scalar
-spv.func @fsub_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @fsub_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.fsub %{{.*}}, %{{.*}} : f32
-  %0 = spv.FSub %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FSub %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @fsub_vector
-spv.func @fsub_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
+spirv.func @fsub_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
   // CHECK: llvm.fsub %{{.*}}, %{{.*}} : vector<2xf32>
-  %0 = spv.FSub %arg0, %arg1 : vector<2xf32>
-  spv.Return
+  %0 = spirv.FSub %arg0, %arg1 : vector<2xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FDiv
+// spirv.FDiv
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fdiv_scalar
-spv.func @fdiv_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @fdiv_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : f32
-  %0 = spv.FDiv %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FDiv %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @fdiv_vector
-spv.func @fdiv_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
+spirv.func @fdiv_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
   // CHECK: llvm.fdiv %{{.*}}, %{{.*}} : vector<3xf64>
-  %0 = spv.FDiv %arg0, %arg1 : vector<3xf64>
-  spv.Return
+  %0 = spirv.FDiv %arg0, %arg1 : vector<3xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FMul
+// spirv.FMul
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fmul_scalar
-spv.func @fmul_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @fmul_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.fmul %{{.*}}, %{{.*}} : f32
-  %0 = spv.FMul %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FMul %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @fmul_vector
-spv.func @fmul_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
+spirv.func @fmul_vector(%arg0: vector<2xf32>, %arg1: vector<2xf32>) "None" {
   // CHECK: llvm.fmul %{{.*}}, %{{.*}} : vector<2xf32>
-  %0 = spv.FMul %arg0, %arg1 : vector<2xf32>
-  spv.Return
+  %0 = spirv.FMul %arg0, %arg1 : vector<2xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FRem
+// spirv.FRem
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @frem_scalar
-spv.func @frem_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @frem_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.frem %{{.*}}, %{{.*}} : f32
-  %0 = spv.FRem %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FRem %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @frem_vector
-spv.func @frem_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
+spirv.func @frem_vector(%arg0: vector<3xf64>, %arg1: vector<3xf64>) "None" {
   // CHECK: llvm.frem %{{.*}}, %{{.*}} : vector<3xf64>
-  %0 = spv.FRem %arg0, %arg1 : vector<3xf64>
-  spv.Return
+  %0 = spirv.FRem %arg0, %arg1 : vector<3xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FNegate
+// spirv.FNegate
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fneg_scalar
-spv.func @fneg_scalar(%arg: f64) "None" {
+spirv.func @fneg_scalar(%arg: f64) "None" {
   // CHECK: llvm.fneg %{{.*}} : f64
-  %0 = spv.FNegate %arg : f64
-  spv.Return
+  %0 = spirv.FNegate %arg : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @fneg_vector
-spv.func @fneg_vector(%arg: vector<2xf32>) "None" {
+spirv.func @fneg_vector(%arg: vector<2xf32>) "None" {
   // CHECK: llvm.fneg %{{.*}} : vector<2xf32>
-  %0 = spv.FNegate %arg : vector<2xf32>
-  spv.Return
+  %0 = spirv.FNegate %arg : vector<2xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.UDiv
+// spirv.UDiv
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @udiv_scalar
-spv.func @udiv_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @udiv_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.udiv %{{.*}}, %{{.*}} : i32
-  %0 = spv.UDiv %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.UDiv %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @udiv_vector
-spv.func @udiv_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" {
+spirv.func @udiv_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" {
   // CHECK: llvm.udiv %{{.*}}, %{{.*}} : vector<3xi64>
-  %0 = spv.UDiv %arg0, %arg1 : vector<3xi64>
-  spv.Return
+  %0 = spirv.UDiv %arg0, %arg1 : vector<3xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.UMod
+// spirv.UMod
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @umod_scalar
-spv.func @umod_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @umod_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.urem %{{.*}}, %{{.*}} : i32
-  %0 = spv.UMod %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.UMod %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @umod_vector
-spv.func @umod_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" {
+spirv.func @umod_vector(%arg0: vector<3xi64>, %arg1: vector<3xi64>) "None" {
   // CHECK: llvm.urem %{{.*}}, %{{.*}} : vector<3xi64>
-  %0 = spv.UMod %arg0, %arg1 : vector<3xi64>
-  spv.Return
+  %0 = spirv.UMod %arg0, %arg1 : vector<3xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SDiv
+// spirv.SDiv
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @sdiv_scalar
-spv.func @sdiv_scalar(%arg0: i16, %arg1: i16) "None" {
+spirv.func @sdiv_scalar(%arg0: i16, %arg1: i16) "None" {
   // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : i16
-  %0 = spv.SDiv %arg0, %arg1 : i16
-  spv.Return
+  %0 = spirv.SDiv %arg0, %arg1 : i16
+  spirv.Return
 }
 
 // CHECK-LABEL: @sdiv_vector
-spv.func @sdiv_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @sdiv_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.sdiv %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.SDiv %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.SDiv %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SRem
+// spirv.SRem
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @srem_scalar
-spv.func @srem_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @srem_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.srem %{{.*}}, %{{.*}} : i32
-  %0 = spv.SRem %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.SRem %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @srem_vector
-spv.func @srem_vector(%arg0: vector<4xi32>, %arg1: vector<4xi32>) "None" {
+spirv.func @srem_vector(%arg0: vector<4xi32>, %arg1: vector<4xi32>) "None" {
   // CHECK: llvm.srem %{{.*}}, %{{.*}} : vector<4xi32>
-  %0 = spv.SRem %arg0, %arg1 : vector<4xi32>
-  spv.Return
+  %0 = spirv.SRem %arg0, %arg1 : vector<4xi32>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
index 488dd7ea37d8..7c25cf579f0d 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/bitwise-ops-to-llvm.mlir
@@ -1,48 +1,48 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.BitCount
+// spirv.BitCount
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitcount_scalar
-spv.func @bitcount_scalar(%arg0: i16) "None" {
+spirv.func @bitcount_scalar(%arg0: i16) "None" {
   // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (i16) -> i16
-  %0 = spv.BitCount %arg0: i16
-  spv.Return
+  %0 = spirv.BitCount %arg0: i16
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitcount_vector
-spv.func @bitcount_vector(%arg0: vector<3xi32>) "None" {
+spirv.func @bitcount_vector(%arg0: vector<3xi32>) "None" {
   // CHECK: "llvm.intr.ctpop"(%{{.*}}) : (vector<3xi32>) -> vector<3xi32>
-  %0 = spv.BitCount %arg0: vector<3xi32>
-  spv.Return
+  %0 = spirv.BitCount %arg0: vector<3xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitReverse
+// spirv.BitReverse
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitreverse_scalar
-spv.func @bitreverse_scalar(%arg0: i64) "None" {
+spirv.func @bitreverse_scalar(%arg0: i64) "None" {
   // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (i64) -> i64
-  %0 = spv.BitReverse %arg0: i64
-  spv.Return
+  %0 = spirv.BitReverse %arg0: i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitreverse_vector
-spv.func @bitreverse_vector(%arg0: vector<4xi32>) "None" {
+spirv.func @bitreverse_vector(%arg0: vector<4xi32>) "None" {
   // CHECK: "llvm.intr.bitreverse"(%{{.*}}) : (vector<4xi32>) -> vector<4xi32>
-  %0 = spv.BitReverse %arg0: vector<4xi32>
-  spv.Return
+  %0 = spirv.BitReverse %arg0: vector<4xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitFieldInsert
+// spirv.BitFieldInsert
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitfield_insert_scalar_same_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i32, %[[INSERT:.*]]: i32, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
-spv.func @bitfield_insert_scalar_same_bit_width(%base: i32, %insert: i32, %offset: i32, %count: i32) "None" {
+spirv.func @bitfield_insert_scalar_same_bit_width(%base: i32, %insert: i32, %offset: i32, %count: i32) "None" {
   // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32
   // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i32
   // CHECK: %[[T1:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32
@@ -51,13 +51,13 @@ spv.func @bitfield_insert_scalar_same_bit_width(%base: i32, %insert: i32, %offse
   // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i32
   // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET]] : i32
   // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i32
-  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
-  spv.Return
+  %0 = spirv.BitFieldInsert %base, %insert, %offset, %count : i32, i32, i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_insert_scalar_smaller_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i64, %[[INSERT:.*]]: i64, %[[OFFSET:.*]]: i8, %[[COUNT:.*]]: i8
-spv.func @bitfield_insert_scalar_smaller_bit_width(%base: i64, %insert: i64, %offset: i8, %count: i8) "None" {
+spirv.func @bitfield_insert_scalar_smaller_bit_width(%base: i64, %insert: i64, %offset: i8, %count: i8) "None" {
   // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i8 to i64
   // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i64
   // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i64) : i64
@@ -68,13 +68,13 @@ spv.func @bitfield_insert_scalar_smaller_bit_width(%base: i64, %insert: i64, %of
   // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i64
   // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[EXT_OFFSET]] : i64
   // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i64
-  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i64, i8, i8
-  spv.Return
+  %0 = spirv.BitFieldInsert %base, %insert, %offset, %count : i64, i8, i8
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_insert_scalar_greater_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i16, %[[INSERT:.*]]: i16, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i64
-spv.func @bitfield_insert_scalar_greater_bit_width(%base: i16, %insert: i16, %offset: i32, %count: i64) "None" {
+spirv.func @bitfield_insert_scalar_greater_bit_width(%base: i16, %insert: i16, %offset: i32, %count: i64) "None" {
   // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i32 to i16
   // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : i64 to i16
   // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i16) : i16
@@ -85,13 +85,13 @@ spv.func @bitfield_insert_scalar_greater_bit_width(%base: i16, %insert: i16, %of
   // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : i16
   // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[TRUNC_OFFSET]] : i16
   // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : i16
-  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : i16, i32, i64
-  spv.Return
+  %0 = spirv.BitFieldInsert %base, %insert, %offset, %count : i16, i32, i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_insert_vector
 //  CHECK-SAME: %[[BASE:.*]]: vector<2xi32>, %[[INSERT:.*]]: vector<2xi32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
-spv.func @bitfield_insert_vector(%base: vector<2xi32>, %insert: vector<2xi32>, %offset: i32, %count: i32) "None" {
+spirv.func @bitfield_insert_vector(%base: vector<2xi32>, %insert: vector<2xi32>, %offset: i32, %count: i32) "None" {
   // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : vector<2xi32>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
   // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : vector<2xi32>
@@ -110,30 +110,30 @@ spv.func @bitfield_insert_vector(%base: vector<2xi32>, %insert: vector<2xi32>, %
   // CHECK: %[[NEW_BASE:.*]] = llvm.and %[[BASE]], %[[MASK]] : vector<2xi32>
   // CHECK: %[[SHIFTED_INSERT:.*]] = llvm.shl %[[INSERT]], %[[OFFSET_V2]] : vector<2xi32>
   // CHECK: llvm.or %[[NEW_BASE]], %[[SHIFTED_INSERT]] : vector<2xi32>
-  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : vector<2xi32>, i32, i32
-  spv.Return
+  %0 = spirv.BitFieldInsert %base, %insert, %offset, %count : vector<2xi32>, i32, i32
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitFieldSExtract
+// spirv.BitFieldSExtract
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitfield_sextract_scalar_same_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i64, %[[OFFSET:.*]]: i64, %[[COUNT:.*]]: i64
-spv.func @bitfield_sextract_scalar_same_bit_width(%base: i64, %offset: i64, %count: i64) "None" {
+spirv.func @bitfield_sextract_scalar_same_bit_width(%base: i64, %offset: i64, %count: i64) "None" {
   // CHECK: %[[SIZE:.]] = llvm.mlir.constant(64 : i64) : i64
   // CHECK: %[[T0:.*]] = llvm.add %[[COUNT]], %[[OFFSET]] : i64
   // CHECK: %[[T1:.*]] = llvm.sub %[[SIZE]], %[[T0]] : i64
   // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i64
   // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET]], %[[T1]] : i64
   // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i64
-  %0 = spv.BitFieldSExtract %base, %offset, %count : i64, i64, i64
-  spv.Return
+  %0 = spirv.BitFieldSExtract %base, %offset, %count : i64, i64, i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_sextract_scalar_smaller_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i8, %[[COUNT:.*]]: i8
-spv.func @bitfield_sextract_scalar_smaller_bit_width(%base: i32, %offset: i8, %count: i8) "None" {
+spirv.func @bitfield_sextract_scalar_smaller_bit_width(%base: i32, %offset: i8, %count: i8) "None" {
   // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i8 to i32
   // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i32
   // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : i32
@@ -142,13 +142,13 @@ spv.func @bitfield_sextract_scalar_smaller_bit_width(%base: i32, %offset: i8, %c
   // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i32
   // CHECK: %[[T2:.*]] = llvm.add %[[EXT_OFFSET]], %[[T1]] : i32
   // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i32
-  %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i8, i8
-  spv.Return
+  %0 = spirv.BitFieldSExtract %base, %offset, %count : i32, i8, i8
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_sextract_scalar_greater_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i64, %[[COUNT:.*]]: i64
-spv.func @bitfield_sextract_scalar_greater_bit_width(%base: i32, %offset: i64, %count: i64) "None" {
+spirv.func @bitfield_sextract_scalar_greater_bit_width(%base: i32, %offset: i64, %count: i64) "None" {
   // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i64 to i32
   // CHECK: %[[TRUNC_COUNT:.*]] = llvm.trunc %[[COUNT]] : i64 to i32
   // CHECK: %[[SIZE:.]] = llvm.mlir.constant(32 : i32) : i32
@@ -157,13 +157,13 @@ spv.func @bitfield_sextract_scalar_greater_bit_width(%base: i32, %offset: i64, %
   // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : i32
   // CHECK: %[[T2:.*]] = llvm.add %[[TRUNC_OFFSET]], %[[T1]] : i32
   // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : i32
-  %0 = spv.BitFieldSExtract %base, %offset, %count : i32, i64, i64
-  spv.Return
+  %0 = spirv.BitFieldSExtract %base, %offset, %count : i32, i64, i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_sextract_vector
 //  CHECK-SAME: %[[BASE:.*]]: vector<2xi32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
-spv.func @bitfield_sextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" {
+spirv.func @bitfield_sextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" {
   // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : vector<2xi32>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
   // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : vector<2xi32>
@@ -180,29 +180,29 @@ spv.func @bitfield_sextract_vector(%base: vector<2xi32>, %offset: i32, %count: i
   // CHECK: %[[SHIFTED_LEFT:.*]] = llvm.shl %[[BASE]], %[[T1]] : vector<2xi32>
   // CHECK: %[[T2:.*]] = llvm.add %[[OFFSET_V2]], %[[T1]] : vector<2xi32>
   // CHECK: llvm.ashr %[[SHIFTED_LEFT]], %[[T2]] : vector<2xi32>
-  %0 = spv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i32, i32
-  spv.Return
+  %0 = spirv.BitFieldSExtract %base, %offset, %count : vector<2xi32>, i32, i32
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitFieldUExtract
+// spirv.BitFieldUExtract
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitfield_uextract_scalar_same_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
-spv.func @bitfield_uextract_scalar_same_bit_width(%base: i32, %offset: i32, %count: i32) "None" {
+spirv.func @bitfield_uextract_scalar_same_bit_width(%base: i32, %offset: i32, %count: i32) "None" {
   // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32
   // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i32
   // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32
   // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET]] : i32
   // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i32
-  %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
-  spv.Return
+  %0 = spirv.BitFieldUExtract %base, %offset, %count : i32, i32, i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_uextract_scalar_smaller_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i32, %[[OFFSET:.*]]: i16, %[[COUNT:.*]]: i8
-spv.func @bitfield_uextract_scalar_smaller_bit_width(%base: i32, %offset: i16, %count: i8) "None" {
+spirv.func @bitfield_uextract_scalar_smaller_bit_width(%base: i32, %offset: i16, %count: i8) "None" {
   // CHECK: %[[EXT_OFFSET:.*]] = llvm.zext %[[OFFSET]] : i16 to i32
   // CHECK: %[[EXT_COUNT:.*]] = llvm.zext %[[COUNT]] : i8 to i32
   // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i32) : i32
@@ -210,26 +210,26 @@ spv.func @bitfield_uextract_scalar_smaller_bit_width(%base: i32, %offset: i16, %
   // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i32
   // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[EXT_OFFSET]] : i32
   // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i32
-  %0 = spv.BitFieldUExtract %base, %offset, %count : i32, i16, i8
-  spv.Return
+  %0 = spirv.BitFieldUExtract %base, %offset, %count : i32, i16, i8
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_uextract_scalar_greater_bit_width
 //  CHECK-SAME: %[[BASE:.*]]: i8, %[[OFFSET:.*]]: i16, %[[COUNT:.*]]: i8
-spv.func @bitfield_uextract_scalar_greater_bit_width(%base: i8, %offset: i16, %count: i8) "None" {
+spirv.func @bitfield_uextract_scalar_greater_bit_width(%base: i8, %offset: i16, %count: i8) "None" {
   // CHECK: %[[TRUNC_OFFSET:.*]] = llvm.trunc %[[OFFSET]] : i16 to i8
   // CHECK: %[[MINUS_ONE:.*]] = llvm.mlir.constant(-1 : i8) : i8
   // CHECK: %[[T0:.*]] = llvm.shl %[[MINUS_ONE]], %[[COUNT]] : i8
   // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : i8
   // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[TRUNC_OFFSET]] : i8
   // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : i8
-  %0 = spv.BitFieldUExtract %base, %offset, %count : i8, i16, i8
-  spv.Return
+  %0 = spirv.BitFieldUExtract %base, %offset, %count : i8, i16, i8
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitfield_uextract_vector
 //  CHECK-SAME: %[[BASE:.*]]: vector<2xi32>, %[[OFFSET:.*]]: i32, %[[COUNT:.*]]: i32
-spv.func @bitfield_uextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" {
+spirv.func @bitfield_uextract_vector(%base: vector<2xi32>, %offset: i32, %count: i32) "None" {
   // CHECK: %[[OFFSET_V0:.*]] = llvm.mlir.undef : vector<2xi32>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
   // CHECK: %[[OFFSET_V1:.*]] = llvm.insertelement %[[OFFSET]], %[[OFFSET_V0]][%[[ZERO]] : i32] : vector<2xi32>
@@ -245,80 +245,80 @@ spv.func @bitfield_uextract_vector(%base: vector<2xi32>, %offset: i32, %count: i
   // CHECK: %[[MASK:.*]] = llvm.xor %[[T0]], %[[MINUS_ONE]] : vector<2xi32>
   // CHECK: %[[SHIFTED_BASE:.*]] = llvm.lshr %[[BASE]], %[[OFFSET_V2]] : vector<2xi32>
   // CHECK: llvm.and %[[SHIFTED_BASE]], %[[MASK]] : vector<2xi32>
-  %0 = spv.BitFieldUExtract %base, %offset, %count : vector<2xi32>, i32, i32
-  spv.Return
+  %0 = spirv.BitFieldUExtract %base, %offset, %count : vector<2xi32>, i32, i32
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitwiseAnd
+// spirv.BitwiseAnd
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitwise_and_scalar
-spv.func @bitwise_and_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @bitwise_and_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.and %{{.*}}, %{{.*}} : i32
-  %0 = spv.BitwiseAnd %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.BitwiseAnd %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitwise_and_vector
-spv.func @bitwise_and_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
+spirv.func @bitwise_and_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
   // CHECK: llvm.and %{{.*}}, %{{.*}} : vector<4xi64>
-  %0 = spv.BitwiseAnd %arg0, %arg1 : vector<4xi64>
-  spv.Return
+  %0 = spirv.BitwiseAnd %arg0, %arg1 : vector<4xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitwiseOr
+// spirv.BitwiseOr
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitwise_or_scalar
-spv.func @bitwise_or_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @bitwise_or_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.or %{{.*}}, %{{.*}} : i64
-  %0 = spv.BitwiseOr %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.BitwiseOr %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitwise_or_vector
-spv.func @bitwise_or_vector(%arg0: vector<3xi8>, %arg1: vector<3xi8>) "None" {
+spirv.func @bitwise_or_vector(%arg0: vector<3xi8>, %arg1: vector<3xi8>) "None" {
   // CHECK: llvm.or %{{.*}}, %{{.*}} : vector<3xi8>
-  %0 = spv.BitwiseOr %arg0, %arg1 : vector<3xi8>
-  spv.Return
+  %0 = spirv.BitwiseOr %arg0, %arg1 : vector<3xi8>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitwiseXor
+// spirv.BitwiseXor
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitwise_xor_scalar
-spv.func @bitwise_xor_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @bitwise_xor_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.xor %{{.*}}, %{{.*}} : i32
-  %0 = spv.BitwiseXor %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.BitwiseXor %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitwise_xor_vector
-spv.func @bitwise_xor_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) "None" {
+spirv.func @bitwise_xor_vector(%arg0: vector<2xi16>, %arg1: vector<2xi16>) "None" {
   // CHECK: llvm.xor %{{.*}}, %{{.*}} : vector<2xi16>
-  %0 = spv.BitwiseXor %arg0, %arg1 : vector<2xi16>
-  spv.Return
+  %0 = spirv.BitwiseXor %arg0, %arg1 : vector<2xi16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Not
+// spirv.Not
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @not_scalar
-spv.func @not_scalar(%arg0: i32) "None" {
+spirv.func @not_scalar(%arg0: i32) "None" {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(-1 : i32) : i32
   // CHECK: llvm.xor %{{.*}}, %[[CONST]] : i32
-  %0 = spv.Not %arg0 : i32
-  spv.Return
+  %0 = spirv.Not %arg0 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @not_vector
-spv.func @not_vector(%arg0: vector<2xi16>) "None" {
+spirv.func @not_vector(%arg0: vector<2xi16>) "None" {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(dense<-1> : vector<2xi16>) : vector<2xi16>
   // CHECK: llvm.xor %{{.*}}, %[[CONST]] : vector<2xi16>
-  %0 = spv.Not %arg0 : vector<2xi16>
-  spv.Return
+  %0 = spirv.Not %arg0 : vector<2xi16>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
index bfabc8c7d0c6..175274cf3534 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/cast-ops-to-llvm.mlir
@@ -1,191 +1,191 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.Bitcast
+// spirv.Bitcast
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bitcast_float_to_integer_scalar
-spv.func @bitcast_float_to_integer_scalar(%arg0 : f32) "None" {
+spirv.func @bitcast_float_to_integer_scalar(%arg0 : f32) "None" {
   // CHECK: llvm.bitcast {{.*}} : f32 to i32
-  %0 = spv.Bitcast %arg0: f32 to i32
-  spv.Return
+  %0 = spirv.Bitcast %arg0: f32 to i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitcast_float_to_integer_vector
-spv.func @bitcast_float_to_integer_vector(%arg0 : vector<3xf32>) "None" {
+spirv.func @bitcast_float_to_integer_vector(%arg0 : vector<3xf32>) "None" {
   // CHECK: {{.*}} = llvm.bitcast {{.*}} : vector<3xf32> to vector<3xi32>
-  %0 = spv.Bitcast %arg0: vector<3xf32> to vector<3xi32>
-  spv.Return
+  %0 = spirv.Bitcast %arg0: vector<3xf32> to vector<3xi32>
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitcast_vector_to_scalar
-spv.func @bitcast_vector_to_scalar(%arg0 : vector<2xf32>) "None" {
+spirv.func @bitcast_vector_to_scalar(%arg0 : vector<2xf32>) "None" {
   // CHECK: {{.*}} = llvm.bitcast {{.*}} : vector<2xf32> to i64
-  %0 = spv.Bitcast %arg0: vector<2xf32> to i64
-  spv.Return
+  %0 = spirv.Bitcast %arg0: vector<2xf32> to i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitcast_scalar_to_vector
-spv.func @bitcast_scalar_to_vector(%arg0 : f64) "None" {
+spirv.func @bitcast_scalar_to_vector(%arg0 : f64) "None" {
   // CHECK: {{.*}} = llvm.bitcast {{.*}} : f64 to vector<2xi32>
-  %0 = spv.Bitcast %arg0: f64 to vector<2xi32>
-  spv.Return
+  %0 = spirv.Bitcast %arg0: f64 to vector<2xi32>
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitcast_vector_to_vector
-spv.func @bitcast_vector_to_vector(%arg0 : vector<4xf32>) "None" {
+spirv.func @bitcast_vector_to_vector(%arg0 : vector<4xf32>) "None" {
   // CHECK: {{.*}} = llvm.bitcast {{.*}} : vector<4xf32> to vector<2xi64>
-  %0 = spv.Bitcast %arg0: vector<4xf32> to vector<2xi64>
-  spv.Return
+  %0 = spirv.Bitcast %arg0: vector<4xf32> to vector<2xi64>
+  spirv.Return
 }
 
 // CHECK-LABEL: @bitcast_pointer
-spv.func @bitcast_pointer(%arg0: !spv.ptr<f32, Function>) "None" {
+spirv.func @bitcast_pointer(%arg0: !spirv.ptr<f32, Function>) "None" {
   // CHECK: llvm.bitcast %{{.*}} : !llvm.ptr<f32> to !llvm.ptr<i32>
-  %0 = spv.Bitcast %arg0 : !spv.ptr<f32, Function> to !spv.ptr<i32, Function>
-  spv.Return
+  %0 = spirv.Bitcast %arg0 : !spirv.ptr<f32, Function> to !spirv.ptr<i32, Function>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertFToS
+// spirv.ConvertFToS
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @convert_float_to_signed_scalar
-spv.func @convert_float_to_signed_scalar(%arg0: f32) "None" {
+spirv.func @convert_float_to_signed_scalar(%arg0: f32) "None" {
   // CHECK: llvm.fptosi %{{.*}} : f32 to i32
-  %0 = spv.ConvertFToS %arg0: f32 to i32
-  spv.Return
+  %0 = spirv.ConvertFToS %arg0: f32 to i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @convert_float_to_signed_vector
-spv.func @convert_float_to_signed_vector(%arg0: vector<2xf32>) "None" {
+spirv.func @convert_float_to_signed_vector(%arg0: vector<2xf32>) "None" {
   // CHECK: llvm.fptosi %{{.*}} : vector<2xf32> to vector<2xi32>
-    %0 = spv.ConvertFToS %arg0: vector<2xf32> to vector<2xi32>
-  spv.Return
+    %0 = spirv.ConvertFToS %arg0: vector<2xf32> to vector<2xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertFToU
+// spirv.ConvertFToU
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @convert_float_to_unsigned_scalar
-spv.func @convert_float_to_unsigned_scalar(%arg0: f32) "None" {
+spirv.func @convert_float_to_unsigned_scalar(%arg0: f32) "None" {
   // CHECK: llvm.fptoui %{{.*}} : f32 to i32
-  %0 = spv.ConvertFToU %arg0: f32 to i32
-  spv.Return
+  %0 = spirv.ConvertFToU %arg0: f32 to i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @convert_float_to_unsigned_vector
-spv.func @convert_float_to_unsigned_vector(%arg0: vector<2xf32>) "None" {
+spirv.func @convert_float_to_unsigned_vector(%arg0: vector<2xf32>) "None" {
   // CHECK: llvm.fptoui %{{.*}} : vector<2xf32> to vector<2xi32>
-    %0 = spv.ConvertFToU %arg0: vector<2xf32> to vector<2xi32>
-  spv.Return
+    %0 = spirv.ConvertFToU %arg0: vector<2xf32> to vector<2xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertSToF
+// spirv.ConvertSToF
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @convert_signed_to_float_scalar
-spv.func @convert_signed_to_float_scalar(%arg0: i32) "None" {
+spirv.func @convert_signed_to_float_scalar(%arg0: i32) "None" {
   // CHECK: llvm.sitofp %{{.*}} : i32 to f32
-  %0 = spv.ConvertSToF %arg0: i32 to f32
-  spv.Return
+  %0 = spirv.ConvertSToF %arg0: i32 to f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @convert_signed_to_float_vector
-spv.func @convert_signed_to_float_vector(%arg0: vector<3xi32>) "None" {
+spirv.func @convert_signed_to_float_vector(%arg0: vector<3xi32>) "None" {
   // CHECK: llvm.sitofp %{{.*}} : vector<3xi32> to vector<3xf32>
-    %0 = spv.ConvertSToF %arg0: vector<3xi32> to vector<3xf32>
-  spv.Return
+    %0 = spirv.ConvertSToF %arg0: vector<3xi32> to vector<3xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertUToF
+// spirv.ConvertUToF
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @convert_unsigned_to_float_scalar
-spv.func @convert_unsigned_to_float_scalar(%arg0: i32) "None" {
+spirv.func @convert_unsigned_to_float_scalar(%arg0: i32) "None" {
   // CHECK: llvm.uitofp %{{.*}} : i32 to f32
-  %0 = spv.ConvertUToF %arg0: i32 to f32
-  spv.Return
+  %0 = spirv.ConvertUToF %arg0: i32 to f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @convert_unsigned_to_float_vector
-spv.func @convert_unsigned_to_float_vector(%arg0: vector<3xi32>) "None" {
+spirv.func @convert_unsigned_to_float_vector(%arg0: vector<3xi32>) "None" {
   // CHECK: llvm.uitofp %{{.*}} : vector<3xi32> to vector<3xf32>
-    %0 = spv.ConvertUToF %arg0: vector<3xi32> to vector<3xf32>
-  spv.Return
+    %0 = spirv.ConvertUToF %arg0: vector<3xi32> to vector<3xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FConvert
+// spirv.FConvert
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fconvert_scalar
-spv.func @fconvert_scalar(%arg0: f32, %arg1: f64) "None" {
+spirv.func @fconvert_scalar(%arg0: f32, %arg1: f64) "None" {
   // CHECK: llvm.fpext %{{.*}} : f32 to f64
-  %0 = spv.FConvert %arg0: f32 to f64
+  %0 = spirv.FConvert %arg0: f32 to f64
 
   // CHECK: llvm.fptrunc %{{.*}} : f64 to f32
-  %1 = spv.FConvert %arg1: f64 to f32
-  spv.Return
+  %1 = spirv.FConvert %arg1: f64 to f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @fconvert_vector
-spv.func @fconvert_vector(%arg0: vector<2xf32>, %arg1: vector<2xf64>) "None" {
+spirv.func @fconvert_vector(%arg0: vector<2xf32>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fpext %{{.*}} : vector<2xf32> to vector<2xf64>
-  %0 = spv.FConvert %arg0: vector<2xf32> to vector<2xf64>
+  %0 = spirv.FConvert %arg0: vector<2xf32> to vector<2xf64>
 
   // CHECK: llvm.fptrunc %{{.*}} : vector<2xf64> to vector<2xf32>
-  %1 = spv.FConvert %arg1: vector<2xf64> to vector<2xf32>
-  spv.Return
+  %1 = spirv.FConvert %arg1: vector<2xf64> to vector<2xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SConvert
+// spirv.SConvert
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @sconvert_scalar
-spv.func @sconvert_scalar(%arg0: i32, %arg1: i64) "None" {
+spirv.func @sconvert_scalar(%arg0: i32, %arg1: i64) "None" {
   // CHECK: llvm.sext %{{.*}} : i32 to i64
-  %0 = spv.SConvert %arg0: i32 to i64
+  %0 = spirv.SConvert %arg0: i32 to i64
 
   // CHECK: llvm.trunc %{{.*}} : i64 to i32
-  %1 = spv.SConvert %arg1: i64 to i32
-  spv.Return
+  %1 = spirv.SConvert %arg1: i64 to i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @sconvert_vector
-spv.func @sconvert_vector(%arg0: vector<3xi32>, %arg1: vector<3xi64>) "None" {
+spirv.func @sconvert_vector(%arg0: vector<3xi32>, %arg1: vector<3xi64>) "None" {
   // CHECK: llvm.sext %{{.*}} : vector<3xi32> to vector<3xi64>
-  %0 = spv.SConvert %arg0: vector<3xi32> to vector<3xi64>
+  %0 = spirv.SConvert %arg0: vector<3xi32> to vector<3xi64>
 
   // CHECK: llvm.trunc %{{.*}} : vector<3xi64> to vector<3xi32>
-  %1 = spv.SConvert %arg1: vector<3xi64> to vector<3xi32>
-  spv.Return
+  %1 = spirv.SConvert %arg1: vector<3xi64> to vector<3xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.UConvert
+// spirv.UConvert
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @uconvert_scalar
-spv.func @uconvert_scalar(%arg0: i32, %arg1: i64) "None" {
+spirv.func @uconvert_scalar(%arg0: i32, %arg1: i64) "None" {
   // CHECK: llvm.zext %{{.*}} : i32 to i64
-  %0 = spv.UConvert %arg0: i32 to i64
+  %0 = spirv.UConvert %arg0: i32 to i64
 
   // CHECK: llvm.trunc %{{.*}} : i64 to i32
-  %1 = spv.UConvert %arg1: i64 to i32
-  spv.Return
+  %1 = spirv.UConvert %arg1: i64 to i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @uconvert_vector
-spv.func @uconvert_vector(%arg0: vector<3xi32>, %arg1: vector<3xi64>) "None" {
+spirv.func @uconvert_vector(%arg0: vector<3xi32>, %arg1: vector<3xi64>) "None" {
   // CHECK: llvm.zext %{{.*}} : vector<3xi32> to vector<3xi64>
-  %0 = spv.UConvert %arg0: vector<3xi32> to vector<3xi64>
+  %0 = spirv.UConvert %arg0: vector<3xi32> to vector<3xi64>
 
   // CHECK: llvm.trunc %{{.*}} : vector<3xi64> to vector<3xi32>
-  %1 = spv.UConvert %arg1: vector<3xi64> to vector<3xi32>
-  spv.Return
+  %1 = spirv.UConvert %arg1: vector<3xi64> to vector<3xi32>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
index 355e313d9f0c..52359db3be7b 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/comparison-ops-to-llvm.mlir
@@ -1,397 +1,397 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.IEqual
+// spirv.IEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @i_equal_scalar
-spv.func @i_equal_scalar(%arg0: i32, %arg1: i32) "None" {
+spirv.func @i_equal_scalar(%arg0: i32, %arg1: i32) "None" {
   // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : i32
-  %0 = spv.IEqual %arg0, %arg1 : i32
-  spv.Return
+  %0 = spirv.IEqual %arg0, %arg1 : i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @i_equal_vector
-spv.func @i_equal_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
+spirv.func @i_equal_vector(%arg0: vector<4xi64>, %arg1: vector<4xi64>) "None" {
   // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : vector<4xi64>
-  %0 = spv.IEqual %arg0, %arg1 : vector<4xi64>
-  spv.Return
+  %0 = spirv.IEqual %arg0, %arg1 : vector<4xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.INotEqual
+// spirv.INotEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @i_not_equal_scalar
-spv.func @i_not_equal_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @i_not_equal_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : i64
-  %0 = spv.INotEqual %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.INotEqual %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @i_not_equal_vector
-spv.func @i_not_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @i_not_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.INotEqual %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.INotEqual %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SGreaterThanEqual
+// spirv.SGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @s_greater_than_equal_scalar
-spv.func @s_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @s_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : i64
-  %0 = spv.SGreaterThanEqual %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.SGreaterThanEqual %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @s_greater_than_equal_vector
-spv.func @s_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @s_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "sge" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.SGreaterThanEqual %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.SGreaterThanEqual %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SGreaterThan
+// spirv.SGreaterThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @s_greater_than_scalar
-spv.func @s_greater_than_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @s_greater_than_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : i64
-  %0 = spv.SGreaterThan %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.SGreaterThan %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @s_greater_than_vector
-spv.func @s_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @s_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "sgt" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.SGreaterThan %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.SGreaterThan %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SLessThanEqual
+// spirv.SLessThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @s_less_than_equal_scalar
-spv.func @s_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @s_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : i64
-  %0 = spv.SLessThanEqual %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.SLessThanEqual %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @s_less_than_equal_vector
-spv.func @s_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @s_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "sle" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.SLessThanEqual %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.SLessThanEqual %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.SLessThan
+// spirv.SLessThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @s_less_than_scalar
-spv.func @s_less_than_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @s_less_than_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : i64
-  %0 = spv.SLessThan %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.SLessThan %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @s_less_than_vector
-spv.func @s_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @s_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "slt" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.SLessThan %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.SLessThan %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.UGreaterThanEqual
+// spirv.UGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @u_greater_than_equal_scalar
-spv.func @u_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @u_greater_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : i64
-  %0 = spv.UGreaterThanEqual %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.UGreaterThanEqual %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @u_greater_than_equal_vector
-spv.func @u_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @u_greater_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "uge" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.UGreaterThanEqual %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.UGreaterThanEqual %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.UGreaterThan
+// spirv.UGreaterThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @u_greater_than_scalar
-spv.func @u_greater_than_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @u_greater_than_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : i64
-  %0 = spv.UGreaterThan %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.UGreaterThan %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @u_greater_than_vector
-spv.func @u_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @u_greater_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "ugt" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.UGreaterThan %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.UGreaterThan %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ULessThanEqual
+// spirv.ULessThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @u_less_than_equal_scalar
-spv.func @u_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @u_less_than_equal_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : i64
-  %0 = spv.ULessThanEqual %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.ULessThanEqual %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @u_less_than_equal_vector
-spv.func @u_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @u_less_than_equal_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "ule" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.ULessThanEqual %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.ULessThanEqual %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ULessThan
+// spirv.ULessThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @u_less_than_scalar
-spv.func @u_less_than_scalar(%arg0: i64, %arg1: i64) "None" {
+spirv.func @u_less_than_scalar(%arg0: i64, %arg1: i64) "None" {
   // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : i64
-  %0 = spv.ULessThan %arg0, %arg1 : i64
-  spv.Return
+  %0 = spirv.ULessThan %arg0, %arg1 : i64
+  spirv.Return
 }
 
 // CHECK-LABEL: @u_less_than_vector
-spv.func @u_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
+spirv.func @u_less_than_vector(%arg0: vector<2xi64>, %arg1: vector<2xi64>) "None" {
   // CHECK: llvm.icmp "ult" %{{.*}}, %{{.*}} : vector<2xi64>
-  %0 = spv.ULessThan %arg0, %arg1 : vector<2xi64>
-  spv.Return
+  %0 = spirv.ULessThan %arg0, %arg1 : vector<2xi64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FOrdEqual
+// spirv.FOrdEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_ord_equal_scalar
-spv.func @f_ord_equal_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @f_ord_equal_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : f32
-  %0 = spv.FOrdEqual %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FOrdEqual %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_ord_equal_vector
-spv.func @f_ord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
+spirv.func @f_ord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
   // CHECK: llvm.fcmp "oeq" %{{.*}}, %{{.*}} : vector<4xf64>
-  %0 = spv.FOrdEqual %arg0, %arg1 : vector<4xf64>
-  spv.Return
+  %0 = spirv.FOrdEqual %arg0, %arg1 : vector<4xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FOrdGreaterThanEqual
+// spirv.FOrdGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_ord_greater_than_equal_scalar
-spv.func @f_ord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_ord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FOrdGreaterThanEqual %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FOrdGreaterThanEqual %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_ord_greater_than_equal_vector
-spv.func @f_ord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_ord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "oge" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FOrdGreaterThanEqual %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FOrdGreaterThanEqual %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FOrdGreaterThan
+// spirv.FOrdGreaterThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_ord_greater_than_scalar
-spv.func @f_ord_greater_than_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_ord_greater_than_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FOrdGreaterThan %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FOrdGreaterThan %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_ord_greater_than_vector
-spv.func @f_ord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_ord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "ogt" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FOrdGreaterThan %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FOrdGreaterThan %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FOrdLessThan
+// spirv.FOrdLessThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_ord_less_than_scalar
-spv.func @f_ord_less_than_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_ord_less_than_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FOrdLessThan %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FOrdLessThan %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_ord_less_than_vector
-spv.func @f_ord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_ord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "olt" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FOrdLessThan %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FOrdLessThan %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FOrdLessThanEqual
+// spirv.FOrdLessThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_ord_less_than_equal_scalar
-spv.func @f_ord_less_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_ord_less_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FOrdLessThanEqual %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FOrdLessThanEqual %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_ord_less_than_equal_vector
-spv.func @f_ord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_ord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "ole" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FOrdLessThanEqual %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FOrdLessThanEqual %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FOrdNotEqual
+// spirv.FOrdNotEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_ord_not_equal_scalar
-spv.func @f_ord_not_equal_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @f_ord_not_equal_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : f32
-  %0 = spv.FOrdNotEqual %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FOrdNotEqual %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_ord_not_equal_vector
-spv.func @f_ord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
+spirv.func @f_ord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
   // CHECK: llvm.fcmp "one" %{{.*}}, %{{.*}} : vector<4xf64>
-  %0 = spv.FOrdNotEqual %arg0, %arg1 : vector<4xf64>
-  spv.Return
+  %0 = spirv.FOrdNotEqual %arg0, %arg1 : vector<4xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FUnordEqual
+// spirv.FUnordEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_unord_equal_scalar
-spv.func @f_unord_equal_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @f_unord_equal_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : f32
-  %0 = spv.FUnordEqual %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FUnordEqual %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_unord_equal_vector
-spv.func @f_unord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
+spirv.func @f_unord_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
   // CHECK: llvm.fcmp "ueq" %{{.*}}, %{{.*}} : vector<4xf64>
-  %0 = spv.FUnordEqual %arg0, %arg1 : vector<4xf64>
-  spv.Return
+  %0 = spirv.FUnordEqual %arg0, %arg1 : vector<4xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FUnordGreaterThanEqual
+// spirv.FUnordGreaterThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_unord_greater_than_equal_scalar
-spv.func @f_unord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_unord_greater_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FUnordGreaterThanEqual %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_unord_greater_than_equal_vector
-spv.func @f_unord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_unord_greater_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "uge" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FUnordGreaterThanEqual %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FUnordGreaterThanEqual %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FUnordGreaterThan
+// spirv.FUnordGreaterThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_unord_greater_than_scalar
-spv.func @f_unord_greater_than_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_unord_greater_than_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FUnordGreaterThan %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FUnordGreaterThan %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_unord_greater_than_vector
-spv.func @f_unord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_unord_greater_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "ugt" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FUnordGreaterThan %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FUnordGreaterThan %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FUnordLessThan
+// spirv.FUnordLessThan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_unord_less_than_scalar
-spv.func @f_unord_less_than_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_unord_less_than_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FUnordLessThan %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FUnordLessThan %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_unord_less_than_vector
-spv.func @f_unord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_unord_less_than_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "ult" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FUnordLessThan %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FUnordLessThan %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FUnordLessThanEqual
+// spirv.FUnordLessThanEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_unord_less_than_equal_scalar
-spv.func @f_unord_less_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
+spirv.func @f_unord_less_than_equal_scalar(%arg0: f64, %arg1: f64) "None" {
   // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : f64
-  %0 = spv.FUnordLessThanEqual %arg0, %arg1 : f64
-  spv.Return
+  %0 = spirv.FUnordLessThanEqual %arg0, %arg1 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_unord_less_than_equal_vector
-spv.func @f_unord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
+spirv.func @f_unord_less_than_equal_vector(%arg0: vector<2xf64>, %arg1: vector<2xf64>) "None" {
   // CHECK: llvm.fcmp "ule" %{{.*}}, %{{.*}} : vector<2xf64>
-  %0 = spv.FUnordLessThanEqual %arg0, %arg1 : vector<2xf64>
-  spv.Return
+  %0 = spirv.FUnordLessThanEqual %arg0, %arg1 : vector<2xf64>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FUnordNotEqual
+// spirv.FUnordNotEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @f_unord_not_equal_scalar
-spv.func @f_unord_not_equal_scalar(%arg0: f32, %arg1: f32) "None" {
+spirv.func @f_unord_not_equal_scalar(%arg0: f32, %arg1: f32) "None" {
   // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : f32
-  %0 = spv.FUnordNotEqual %arg0, %arg1 : f32
-  spv.Return
+  %0 = spirv.FUnordNotEqual %arg0, %arg1 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @f_unord_not_equal_vector
-spv.func @f_unord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
+spirv.func @f_unord_not_equal_vector(%arg0: vector<4xf64>, %arg1: vector<4xf64>) "None" {
   // CHECK: llvm.fcmp "une" %{{.*}}, %{{.*}} : vector<4xf64>
-  %0 = spv.FUnordNotEqual %arg0, %arg1 : vector<4xf64>
-  spv.Return
+  %0 = spirv.FUnordNotEqual %arg0, %arg1 : vector<4xf64>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
index 24c3aebdd0fc..2d74022b3440 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/constant-op-to-llvm.mlir
@@ -1,61 +1,61 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.Constant
+// spirv.Constant
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @bool_constant_scalar
-spv.func @bool_constant_scalar() "None" {
+spirv.func @bool_constant_scalar() "None" {
   // CHECK: llvm.mlir.constant(true) : i1
-  %0 = spv.Constant true
+  %0 = spirv.Constant true
   // CHECK: llvm.mlir.constant(false) : i1
-  %1 = spv.Constant false
-  spv.Return
+  %1 = spirv.Constant false
+  spirv.Return
 }
 
 // CHECK-LABEL: @bool_constant_vector
-spv.func @bool_constant_vector() "None" {
+spirv.func @bool_constant_vector() "None" {
   // CHECK: llvm.mlir.constant(dense<[true, false]> : vector<2xi1>) : vector<2xi1>
-  %0 = spv.Constant dense<[true, false]> : vector<2xi1>
+  %0 = spirv.Constant dense<[true, false]> : vector<2xi1>
   // CHECK: llvm.mlir.constant(dense<false> : vector<3xi1>) : vector<3xi1>
-  %1 = spv.Constant dense<false> : vector<3xi1>
-  spv.Return
+  %1 = spirv.Constant dense<false> : vector<3xi1>
+  spirv.Return
 }
 
 // CHECK-LABEL: @integer_constant_scalar
-spv.func @integer_constant_scalar() "None" {
+spirv.func @integer_constant_scalar() "None" {
   // CHECK: llvm.mlir.constant(0 : i8) : i8
-  %0 = spv.Constant  0 : i8
+  %0 = spirv.Constant  0 : i8
   // CHECK: llvm.mlir.constant(-5 : i64) : i64
-  %1 = spv.Constant -5 : si64
+  %1 = spirv.Constant -5 : si64
   // CHECK: llvm.mlir.constant(10 : i16) : i16
-  %2 = spv.Constant  10 : ui16
-  spv.Return
+  %2 = spirv.Constant  10 : ui16
+  spirv.Return
 }
 
 // CHECK-LABEL: @integer_constant_vector
-spv.func @integer_constant_vector() "None" {
+spirv.func @integer_constant_vector() "None" {
   // CHECK: llvm.mlir.constant(dense<[2, 3]> : vector<2xi32>) : vector<2xi32>
-  %0 = spv.Constant dense<[2, 3]> : vector<2xi32>
+  %0 = spirv.Constant dense<[2, 3]> : vector<2xi32>
   // CHECK: llvm.mlir.constant(dense<-4> : vector<2xi32>) : vector<2xi32>
-  %1 = spv.Constant dense<-4> : vector<2xsi32>
+  %1 = spirv.Constant dense<-4> : vector<2xsi32>
   // CHECK: llvm.mlir.constant(dense<[2, 3, 4]> : vector<3xi32>) : vector<3xi32>
-  %2 = spv.Constant dense<[2, 3, 4]> : vector<3xui32>
-  spv.Return
+  %2 = spirv.Constant dense<[2, 3, 4]> : vector<3xui32>
+  spirv.Return
 }
 
 // CHECK-LABEL: @float_constant_scalar
-spv.func @float_constant_scalar() "None" {
+spirv.func @float_constant_scalar() "None" {
   // CHECK: llvm.mlir.constant(5.000000e+00 : f16) : f16
-  %0 = spv.Constant 5.000000e+00 : f16
+  %0 = spirv.Constant 5.000000e+00 : f16
   // CHECK: llvm.mlir.constant(5.000000e+00 : f64) : f64
-  %1 = spv.Constant 5.000000e+00 : f64
-  spv.Return
+  %1 = spirv.Constant 5.000000e+00 : f64
+  spirv.Return
 }
 
 // CHECK-LABEL: @float_constant_vector
-spv.func @float_constant_vector() "None" {
+spirv.func @float_constant_vector() "None" {
   // CHECK: llvm.mlir.constant(dense<[2.000000e+00, 3.000000e+00]> : vector<2xf32>) : vector<2xf32>
-  %0 = spv.Constant dense<[2.000000e+00, 3.000000e+00]> : vector<2xf32>
-  spv.Return
+  %0 = spirv.Constant dense<[2.000000e+00, 3.000000e+00]> : vector<2xf32>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
index 54d059604edf..3cb727e7dbaa 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/control-flow-ops-to-llvm.mlir
@@ -1,93 +1,93 @@
 // RUN: mlir-opt -convert-spirv-to-llvm -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.Branch
+// spirv.Branch
 //===----------------------------------------------------------------------===//
 
-spv.module Logical GLSL450 {
-  spv.func @branch_without_arguments() -> () "None" {
+spirv.module Logical GLSL450 {
+  spirv.func @branch_without_arguments() -> () "None" {
 	  // CHECK: llvm.br ^bb1
-    spv.Branch ^label
+    spirv.Branch ^label
   // CHECK: ^bb1
   ^label:
-    spv.Return
+    spirv.Return
   }
 
-  spv.func @branch_with_arguments() -> () "None" {
-    %0 = spv.Constant 0 : i32
-    %1 = spv.Constant true
+  spirv.func @branch_with_arguments() -> () "None" {
+    %0 = spirv.Constant 0 : i32
+    %1 = spirv.Constant true
     // CHECK: llvm.br ^bb1(%{{.*}}, %{{.*}} : i32, i1)
-    spv.Branch ^label(%0, %1: i32, i1)
+    spirv.Branch ^label(%0, %1: i32, i1)
   // CHECK: ^bb1(%{{.*}}: i32, %{{.*}}: i1)
   ^label(%arg0: i32, %arg1: i1):
-    spv.Return
+    spirv.Return
   }
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.BranchConditional
+// spirv.BranchConditional
 //===----------------------------------------------------------------------===//
 
-spv.module Logical GLSL450 {
-  spv.func @cond_branch_without_arguments() -> () "None" {
+spirv.module Logical GLSL450 {
+  spirv.func @cond_branch_without_arguments() -> () "None" {
     // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1
-    %cond = spv.Constant true
+    %cond = spirv.Constant true
     // CHECK: lvm.cond_br %[[COND]], ^bb1, ^bb2
-    spv.BranchConditional %cond, ^true, ^false
+    spirv.BranchConditional %cond, ^true, ^false
     // CHECK: ^bb1:
   ^true:
-    spv.Return
+    spirv.Return
     // CHECK: ^bb2:
   ^false:
-    spv.Return
+    spirv.Return
   }
 
-  spv.func @cond_branch_with_arguments_nested() -> () "None" {
+  spirv.func @cond_branch_with_arguments_nested() -> () "None" {
     // CHECK: %[[COND1:.*]] = llvm.mlir.constant(true) : i1
-    %cond = spv.Constant true
-    %0 = spv.Constant 0 : i32
+    %cond = spirv.Constant true
+    %0 = spirv.Constant 0 : i32
     // CHECK: %[[COND2:.*]] = llvm.mlir.constant(false) : i1
-    %false = spv.Constant false
+    %false = spirv.Constant false
     // CHECK: llvm.cond_br %[[COND1]], ^bb1(%{{.*}}, %[[COND2]] : i32, i1), ^bb2
-    spv.BranchConditional %cond, ^outer_true(%0, %false: i32, i1), ^outer_false
+    spirv.BranchConditional %cond, ^outer_true(%0, %false: i32, i1), ^outer_false
   // CHECK: ^bb1(%{{.*}}: i32, %[[COND:.*]]: i1):
   ^outer_true(%arg0: i32, %arg1: i1):
     // CHECK: llvm.cond_br %[[COND]], ^bb3, ^bb4(%{{.*}}, %{{.*}} : i32, i32)
-    spv.BranchConditional %arg1, ^inner_true, ^inner_false(%arg0, %arg0: i32, i32)
+    spirv.BranchConditional %arg1, ^inner_true, ^inner_false(%arg0, %arg0: i32, i32)
   // CHECK: ^bb2:
   ^outer_false:
-    spv.Return
+    spirv.Return
   // CHECK: ^bb3:
   ^inner_true:
-    spv.Return
+    spirv.Return
   // CHECK: ^bb4(%{{.*}}: i32, %{{.*}}: i32):
   ^inner_false(%arg3: i32, %arg4: i32):
-    spv.Return
+    spirv.Return
   }
 
-  spv.func @cond_branch_with_weights(%cond: i1) -> () "None" {
+  spirv.func @cond_branch_with_weights(%cond: i1) -> () "None" {
     // CHECK: llvm.cond_br %{{.*}} weights(dense<[1, 2]> : vector<2xi32>), ^bb1, ^bb2
-    spv.BranchConditional %cond [1, 2], ^true, ^false
+    spirv.BranchConditional %cond [1, 2], ^true, ^false
   // CHECK: ^bb1:
   ^true:
-    spv.Return
+    spirv.Return
   // CHECK: ^bb2:
   ^false:
-    spv.Return
+    spirv.Return
   }
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.loop
+// spirv.mlir.loop
 //===----------------------------------------------------------------------===//
 
-spv.module Logical GLSL450 {
+spirv.module Logical GLSL450 {
   // CHECK-LABEL: @infinite_loop
-  spv.func @infinite_loop(%count : i32) -> () "None" {
+  spirv.func @infinite_loop(%count : i32) -> () "None" {
     // CHECK:   llvm.br ^[[BB1:.*]]
     // CHECK: ^[[BB1]]:
     // CHECK:   %[[COND:.*]] = llvm.mlir.constant(true) : i1
@@ -100,110 +100,110 @@ spv.module Logical GLSL450 {
     // CHECK:   llvm.br ^[[BB5:.*]]
     // CHECK: ^[[BB5]]:
     // CHECK:   llvm.return
-    spv.mlir.loop {
-      spv.Branch ^header
+    spirv.mlir.loop {
+      spirv.Branch ^header
     ^header:
-      %cond = spv.Constant true
-      spv.BranchConditional %cond, ^body, ^merge
+      %cond = spirv.Constant true
+      spirv.BranchConditional %cond, ^body, ^merge
     ^body:
       // Do nothing
-      spv.Branch ^continue
+      spirv.Branch ^continue
     ^continue:
       // Do nothing
-      spv.Branch ^header
+      spirv.Branch ^header
     ^merge:
-      spv.mlir.merge
+      spirv.mlir.merge
     }
-    spv.Return
+    spirv.Return
   }
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.mlir.selection
+// spirv.mlir.selection
 //===----------------------------------------------------------------------===//
 
-spv.module Logical GLSL450 {
-  spv.func @selection_empty() -> () "None" {
+spirv.module Logical GLSL450 {
+  spirv.func @selection_empty() -> () "None" {
     // CHECK: llvm.return
-    spv.mlir.selection {
+    spirv.mlir.selection {
     }
-    spv.Return
+    spirv.Return
   }
 
-  spv.func @selection_with_merge_block_only() -> () "None" {
-    %cond = spv.Constant true
+  spirv.func @selection_with_merge_block_only() -> () "None" {
+    %cond = spirv.Constant true
     // CHECK: llvm.return
-    spv.mlir.selection {
-      spv.BranchConditional %cond, ^merge, ^merge
+    spirv.mlir.selection {
+      spirv.BranchConditional %cond, ^merge, ^merge
     ^merge:
-      spv.mlir.merge
+      spirv.mlir.merge
     }
-    spv.Return
+    spirv.Return
   }
 
-  spv.func @selection_with_true_block_only() -> () "None" {
+  spirv.func @selection_with_true_block_only() -> () "None" {
     // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1
-    %cond = spv.Constant true
+    %cond = spirv.Constant true
     // CHECK: llvm.cond_br %[[COND]], ^bb1, ^bb2
-    spv.mlir.selection {
-      spv.BranchConditional %cond, ^true, ^merge
+    spirv.mlir.selection {
+      spirv.BranchConditional %cond, ^true, ^merge
     // CHECK: ^bb1:
     ^true:
     // CHECK: llvm.br ^bb2
-      spv.Branch ^merge
+      spirv.Branch ^merge
     // CHECK: ^bb2:
     ^merge:
       // CHECK: llvm.br ^bb3
-      spv.mlir.merge
+      spirv.mlir.merge
     }
     // CHECK: ^bb3:
     // CHECK-NEXT: llvm.return
-    spv.Return
+    spirv.Return
   }
 
-  spv.func @selection_with_both_true_and_false_block() -> () "None" {
+  spirv.func @selection_with_both_true_and_false_block() -> () "None" {
     // CHECK: %[[COND:.*]] = llvm.mlir.constant(true) : i1
-    %cond = spv.Constant true
+    %cond = spirv.Constant true
     // CHECK: llvm.cond_br %[[COND]], ^bb1, ^bb2
-    spv.mlir.selection {
-      spv.BranchConditional %cond, ^true, ^false
+    spirv.mlir.selection {
+      spirv.BranchConditional %cond, ^true, ^false
     // CHECK: ^bb1:
     ^true:
     // CHECK: llvm.br ^bb3
-      spv.Branch ^merge
+      spirv.Branch ^merge
     // CHECK: ^bb2:
     ^false:
     // CHECK: llvm.br ^bb3
-      spv.Branch ^merge
+      spirv.Branch ^merge
     // CHECK: ^bb3:
     ^merge:
       // CHECK: llvm.br ^bb4
-      spv.mlir.merge
+      spirv.mlir.merge
     }
     // CHECK: ^bb4:
     // CHECK-NEXT: llvm.return
-    spv.Return
+    spirv.Return
   }
 
-  spv.func @selection_with_early_return(%arg0: i1) -> i32 "None" {
+  spirv.func @selection_with_early_return(%arg0: i1) -> i32 "None" {
     // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
-    %0 = spv.Constant 0 : i32
+    %0 = spirv.Constant 0 : i32
     // CHECK: llvm.cond_br %{{.*}}, ^bb1(%[[ZERO]] : i32), ^bb2
-    spv.mlir.selection {
-      spv.BranchConditional %arg0, ^true(%0 : i32), ^merge
+    spirv.mlir.selection {
+      spirv.BranchConditional %arg0, ^true(%0 : i32), ^merge
     // CHECK: ^bb1(%[[ARG:.*]]: i32):
     ^true(%arg1: i32):
       // CHECK: llvm.return %[[ARG]] : i32
-      spv.ReturnValue %arg1 : i32
+      spirv.ReturnValue %arg1 : i32
     // CHECK: ^bb2:
     ^merge:
       // CHECK: llvm.br ^bb3
-      spv.mlir.merge
+      spirv.mlir.merge
     }
     // CHECK: ^bb3:
-    %one = spv.Constant 1 : i32
-    spv.ReturnValue %one : i32
+    %one = spirv.Constant 1 : i32
+    spirv.ReturnValue %one : i32
   }
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
index e196a304a872..5b3d8ba5ca59 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/func-ops-to-llvm.mlir
@@ -1,95 +1,95 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.Return
+// spirv.Return
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @return
-spv.func @return() "None" {
+spirv.func @return() "None" {
   // CHECK: llvm.return
-  spv.Return
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ReturnValue
+// spirv.ReturnValue
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @return_value
-spv.func @return_value(%arg: i32) -> i32 "None" {
+spirv.func @return_value(%arg: i32) -> i32 "None" {
   // CHECK: llvm.return %{{.*}} : i32
-  spv.ReturnValue %arg : i32
+  spirv.ReturnValue %arg : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.func
+// spirv.func
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: llvm.func @none()
-spv.func @none() "None" {
-  spv.Return
+spirv.func @none() "None" {
+  spirv.Return
 }
 
 // CHECK-LABEL: llvm.func @inline() attributes {passthrough = ["alwaysinline"]}
-spv.func @inline() "Inline" {
-  spv.Return
+spirv.func @inline() "Inline" {
+  spirv.Return
 }
 
 // CHECK-LABEL: llvm.func @dont_inline() attributes {passthrough = ["noinline"]}
-spv.func @dont_inline() "DontInline" {
-  spv.Return
+spirv.func @dont_inline() "DontInline" {
+  spirv.Return
 }
 
 // CHECK-LABEL: llvm.func @pure() attributes {passthrough = ["readonly"]}
-spv.func @pure() "Pure" {
-  spv.Return
+spirv.func @pure() "Pure" {
+  spirv.Return
 }
 
 // CHECK-LABEL: llvm.func @const() attributes {passthrough = ["readnone"]}
-spv.func @const() "Const" {
-  spv.Return
+spirv.func @const() "Const" {
+  spirv.Return
 }
 
 // CHECK-LABEL: llvm.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: f32)
-spv.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: f32) "None" {
-  spv.Return
+spirv.func @scalar_types(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: f32) "None" {
+  spirv.Return
 }
 
 // CHECK-LABEL: llvm.func @vector_types(%arg0: vector<2xi64>, %arg1: vector<2xi64>) -> vector<2xi64>
-spv.func @vector_types(%arg0: vector<2xi64>, %arg1: vector<2xi64>) -> vector<2xi64> "None" {
-  %0 = spv.IAdd %arg0, %arg1 : vector<2xi64>
-  spv.ReturnValue %0 : vector<2xi64>
+spirv.func @vector_types(%arg0: vector<2xi64>, %arg1: vector<2xi64>) -> vector<2xi64> "None" {
+  %0 = spirv.IAdd %arg0, %arg1 : vector<2xi64>
+  spirv.ReturnValue %0 : vector<2xi64>
 }
 
 //===----------------------------------------------------------------------===//
-// spv.FunctionCall
+// spirv.FunctionCall
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: llvm.func @function_calls
 // CHECK-SAME: %[[ARG0:.*]]: i32, %[[ARG1:.*]]: i1, %[[ARG2:.*]]: f64, %[[ARG3:.*]]: vector<2xi64>, %[[ARG4:.*]]: vector<2xf32>
-spv.func @function_calls(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: vector<2xi64>, %arg4: vector<2xf32>) "None" {
+spirv.func @function_calls(%arg0: i32, %arg1: i1, %arg2: f64, %arg3: vector<2xi64>, %arg4: vector<2xf32>) "None" {
   // CHECK: llvm.call @void_1() : () -> ()
   // CHECK: llvm.call @void_2(%[[ARG3]]) : (vector<2xi64>) -> ()
   // CHECK: llvm.call @value_scalar(%[[ARG0]], %[[ARG1]], %[[ARG2]]) : (i32, i1, f64) -> i32
   // CHECK: llvm.call @value_vector(%[[ARG3]], %[[ARG4]]) : (vector<2xi64>, vector<2xf32>) -> vector<2xf32>
-  spv.FunctionCall @void_1() : () -> ()
-  spv.FunctionCall @void_2(%arg3) : (vector<2xi64>) -> ()
-  %0 = spv.FunctionCall @value_scalar(%arg0, %arg1, %arg2) : (i32, i1, f64) -> i32
-  %1 = spv.FunctionCall @value_vector(%arg3, %arg4) : (vector<2xi64>, vector<2xf32>) -> vector<2xf32>
-  spv.Return
+  spirv.FunctionCall @void_1() : () -> ()
+  spirv.FunctionCall @void_2(%arg3) : (vector<2xi64>) -> ()
+  %0 = spirv.FunctionCall @value_scalar(%arg0, %arg1, %arg2) : (i32, i1, f64) -> i32
+  %1 = spirv.FunctionCall @value_vector(%arg3, %arg4) : (vector<2xi64>, vector<2xf32>) -> vector<2xf32>
+  spirv.Return
 }
 
-spv.func @void_1() "None" {
-  spv.Return
+spirv.func @void_1() "None" {
+  spirv.Return
 }
 
-spv.func @void_2(%arg0: vector<2xi64>) "None" {
-  spv.Return
+spirv.func @void_2(%arg0: vector<2xi64>) "None" {
+  spirv.Return
 }
 
-spv.func @value_scalar(%arg0: i32, %arg1: i1, %arg2: f64) -> i32 "None" {
-  spv.ReturnValue %arg0: i32
+spirv.func @value_scalar(%arg0: i32, %arg1: i1, %arg2: f64) -> i32 "None" {
+  spirv.ReturnValue %arg0: i32
 }
 
-spv.func @value_vector(%arg0: vector<2xi64>, %arg1: vector<2xf32>) -> vector<2xf32> "None" {
-  spv.ReturnValue %arg1: vector<2xf32>
+spirv.func @value_vector(%arg0: vector<2xi64>, %arg1: vector<2xf32>) -> vector<2xf32> "None" {
+  spirv.ReturnValue %arg1: vector<2xf32>
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/gl-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/gl-ops-to-llvm.mlir
index 0caa9a4e5ee8..dffa7dd0f392 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/gl-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/gl-ops-to-llvm.mlir
@@ -1,180 +1,180 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Ceil
+// spirv.GL.Ceil
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @ceil
-spv.func @ceil(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @ceil(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.ceil"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.Ceil %arg0 : f32
+  %0 = spirv.GL.Ceil %arg0 : f32
   // CHECK: "llvm.intr.ceil"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.Ceil %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.Ceil %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Cos
+// spirv.GL.Cos
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @cos
-spv.func @cos(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @cos(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.cos"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.Cos %arg0 : f32
+  %0 = spirv.GL.Cos %arg0 : f32
   // CHECK: "llvm.intr.cos"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.Cos %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.Cos %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Exp
+// spirv.GL.Exp
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @exp
-spv.func @exp(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @exp(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.exp"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.Exp %arg0 : f32
+  %0 = spirv.GL.Exp %arg0 : f32
   // CHECK: "llvm.intr.exp"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.Exp %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.Exp %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.FAbs
+// spirv.GL.FAbs
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fabs
-spv.func @fabs(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @fabs(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.fabs"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.FAbs %arg0 : f32
+  %0 = spirv.GL.FAbs %arg0 : f32
   // CHECK: "llvm.intr.fabs"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.FAbs %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.FAbs %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Floor
+// spirv.GL.Floor
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @floor
-spv.func @floor(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @floor(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.floor"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.Floor %arg0 : f32
+  %0 = spirv.GL.Floor %arg0 : f32
   // CHECK: "llvm.intr.floor"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.Floor %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.Floor %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.FMax
+// spirv.GL.FMax
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fmax
-spv.func @fmax(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @fmax(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.maxnum"(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
-  %0 = spv.GL.FMax %arg0, %arg0 : f32
+  %0 = spirv.GL.FMax %arg0, %arg0 : f32
   // CHECK: "llvm.intr.maxnum"(%{{.*}}, %{{.*}}) : (vector<3xf16>, vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.FMax %arg1, %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.FMax %arg1, %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.FMin
+// spirv.GL.FMin
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @fmin
-spv.func @fmin(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @fmin(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.minnum"(%{{.*}}, %{{.*}}) : (f32, f32) -> f32
-  %0 = spv.GL.FMin %arg0, %arg0 : f32
+  %0 = spirv.GL.FMin %arg0, %arg0 : f32
   // CHECK: "llvm.intr.minnum"(%{{.*}}, %{{.*}}) : (vector<3xf16>, vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.FMin %arg1, %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.FMin %arg1, %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Log
+// spirv.GL.Log
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @log
-spv.func @log(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @log(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.log"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.Log %arg0 : f32
+  %0 = spirv.GL.Log %arg0 : f32
   // CHECK: "llvm.intr.log"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.Log %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.Log %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Sin
+// spirv.GL.Sin
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @sin
-spv.func @sin(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @sin(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.sin"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.Sin %arg0 : f32
+  %0 = spirv.GL.Sin %arg0 : f32
   // CHECK: "llvm.intr.sin"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.Sin %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.Sin %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.SMax
+// spirv.GL.SMax
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @smax
-spv.func @smax(%arg0: i16, %arg1: vector<3xi32>) "None" {
+spirv.func @smax(%arg0: i16, %arg1: vector<3xi32>) "None" {
   // CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (i16, i16) -> i16
-  %0 = spv.GL.SMax %arg0, %arg0 : i16
+  %0 = spirv.GL.SMax %arg0, %arg0 : i16
   // CHECK: "llvm.intr.smax"(%{{.*}}, %{{.*}}) : (vector<3xi32>, vector<3xi32>) -> vector<3xi32>
-  %1 = spv.GL.SMax %arg1, %arg1 : vector<3xi32>
-  spv.Return
+  %1 = spirv.GL.SMax %arg1, %arg1 : vector<3xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.SMin
+// spirv.GL.SMin
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @smin
-spv.func @smin(%arg0: i16, %arg1: vector<3xi32>) "None" {
+spirv.func @smin(%arg0: i16, %arg1: vector<3xi32>) "None" {
   // CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (i16, i16) -> i16
-  %0 = spv.GL.SMin %arg0, %arg0 : i16
+  %0 = spirv.GL.SMin %arg0, %arg0 : i16
   // CHECK: "llvm.intr.smin"(%{{.*}}, %{{.*}}) : (vector<3xi32>, vector<3xi32>) -> vector<3xi32>
-  %1 = spv.GL.SMin %arg1, %arg1 : vector<3xi32>
-  spv.Return
+  %1 = spirv.GL.SMin %arg1, %arg1 : vector<3xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Sqrt
+// spirv.GL.Sqrt
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @sqrt
-spv.func @sqrt(%arg0: f32, %arg1: vector<3xf16>) "None" {
+spirv.func @sqrt(%arg0: f32, %arg1: vector<3xf16>) "None" {
   // CHECK: "llvm.intr.sqrt"(%{{.*}}) : (f32) -> f32
-  %0 = spv.GL.Sqrt %arg0 : f32
+  %0 = spirv.GL.Sqrt %arg0 : f32
   // CHECK: "llvm.intr.sqrt"(%{{.*}}) : (vector<3xf16>) -> vector<3xf16>
-  %1 = spv.GL.Sqrt %arg1 : vector<3xf16>
-  spv.Return
+  %1 = spirv.GL.Sqrt %arg1 : vector<3xf16>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Tan
+// spirv.GL.Tan
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @tan
-spv.func @tan(%arg0: f32) "None" {
+spirv.func @tan(%arg0: f32) "None" {
   // CHECK: %[[SIN:.*]] = "llvm.intr.sin"(%{{.*}}) : (f32) -> f32
   // CHECK: %[[COS:.*]] = "llvm.intr.cos"(%{{.*}}) : (f32) -> f32
   // CHECK: llvm.fdiv %[[SIN]], %[[COS]] : f32
-  %0 = spv.GL.Tan %arg0 : f32
-  spv.Return
+  %0 = spirv.GL.Tan %arg0 : f32
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.Tanh
+// spirv.GL.Tanh
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @tanh
-spv.func @tanh(%arg0: f32) "None" {
+spirv.func @tanh(%arg0: f32) "None" {
   // CHECK: %[[TWO:.*]] = llvm.mlir.constant(2.000000e+00 : f32) : f32
   // CHECK: %[[X2:.*]] = llvm.fmul %[[TWO]], %{{.*}} : f32
   // CHECK: %[[EXP:.*]] = "llvm.intr.exp"(%[[X2]]) : (f32) -> f32
@@ -182,19 +182,19 @@ spv.func @tanh(%arg0: f32) "None" {
   // CHECK: %[[T0:.*]] = llvm.fsub %[[EXP]], %[[ONE]] : f32
   // CHECK: %[[T1:.*]] = llvm.fadd %[[EXP]], %[[ONE]] : f32
   // CHECK: llvm.fdiv %[[T0]], %[[T1]] : f32
-  %0 = spv.GL.Tanh %arg0 : f32
-  spv.Return
+  %0 = spirv.GL.Tanh %arg0 : f32
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GL.InverseSqrt
+// spirv.GL.InverseSqrt
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @inverse_sqrt
-spv.func @inverse_sqrt(%arg0: f32) "None" {
+spirv.func @inverse_sqrt(%arg0: f32) "None" {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1.000000e+00 : f32) : f32
   // CHECK: %[[SQRT:.*]] = "llvm.intr.sqrt"(%{{.*}}) : (f32) -> f32
   // CHECK: llvm.fdiv %[[ONE]], %[[SQRT]] : f32
-  %0 = spv.GL.InverseSqrt %arg0 : f32
-  spv.Return
+  %0 = spirv.GL.InverseSqrt %arg0 : f32
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
index d052a3bf406c..6d93480d3ed1 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/logical-ops-to-llvm.mlir
@@ -1,93 +1,93 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalEqual
+// spirv.LogicalEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @logical_equal_scalar
-spv.func @logical_equal_scalar(%arg0: i1, %arg1: i1) "None" {
+spirv.func @logical_equal_scalar(%arg0: i1, %arg1: i1) "None" {
   // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : i1
-  %0 = spv.LogicalEqual %arg0, %arg0 : i1
-  spv.Return
+  %0 = spirv.LogicalEqual %arg0, %arg0 : i1
+  spirv.Return
 }
 
 // CHECK-LABEL: @logical_equal_vector
-spv.func @logical_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
+spirv.func @logical_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
   // CHECK: llvm.icmp "eq" %{{.*}}, %{{.*}} : vector<4xi1>
-  %0 = spv.LogicalEqual %arg0, %arg0 : vector<4xi1>
-  spv.Return
+  %0 = spirv.LogicalEqual %arg0, %arg0 : vector<4xi1>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalNotEqual
+// spirv.LogicalNotEqual
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @logical_not_equal_scalar
-spv.func @logical_not_equal_scalar(%arg0: i1, %arg1: i1) "None" {
+spirv.func @logical_not_equal_scalar(%arg0: i1, %arg1: i1) "None" {
   // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : i1
-  %0 = spv.LogicalNotEqual %arg0, %arg0 : i1
-  spv.Return
+  %0 = spirv.LogicalNotEqual %arg0, %arg0 : i1
+  spirv.Return
 }
 
 // CHECK-LABEL: @logical_not_equal_vector
-spv.func @logical_not_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
+spirv.func @logical_not_equal_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
   // CHECK: llvm.icmp "ne" %{{.*}}, %{{.*}} : vector<4xi1>
-  %0 = spv.LogicalNotEqual %arg0, %arg0 : vector<4xi1>
-  spv.Return
+  %0 = spirv.LogicalNotEqual %arg0, %arg0 : vector<4xi1>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalNot
+// spirv.LogicalNot
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @logical_not_scalar
-spv.func @logical_not_scalar(%arg0: i1) "None" {
+spirv.func @logical_not_scalar(%arg0: i1) "None" {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(true) : i1
   // CHECK: llvm.xor %{{.*}}, %[[CONST]] : i1
-  %0 = spv.LogicalNot %arg0 : i1
-  spv.Return
+  %0 = spirv.LogicalNot %arg0 : i1
+  spirv.Return
 }
 
 // CHECK-LABEL: @logical_not_vector
-spv.func @logical_not_vector(%arg0: vector<4xi1>) "None" {
+spirv.func @logical_not_vector(%arg0: vector<4xi1>) "None" {
   // CHECK: %[[CONST:.*]] = llvm.mlir.constant(dense<true> : vector<4xi1>) : vector<4xi1>
   // CHECK: llvm.xor %{{.*}}, %[[CONST]] : vector<4xi1>
-  %0 = spv.LogicalNot %arg0 : vector<4xi1>
-  spv.Return
+  %0 = spirv.LogicalNot %arg0 : vector<4xi1>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalAnd
+// spirv.LogicalAnd
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @logical_and_scalar
-spv.func @logical_and_scalar(%arg0: i1, %arg1: i1) "None" {
+spirv.func @logical_and_scalar(%arg0: i1, %arg1: i1) "None" {
   // CHECK: llvm.and %{{.*}}, %{{.*}} : i1
-  %0 = spv.LogicalAnd %arg0, %arg0 : i1
-  spv.Return
+  %0 = spirv.LogicalAnd %arg0, %arg0 : i1
+  spirv.Return
 }
 
 // CHECK-LABEL: @logical_and_vector
-spv.func @logical_and_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
+spirv.func @logical_and_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
   // CHECK: llvm.and %{{.*}}, %{{.*}} : vector<4xi1>
-  %0 = spv.LogicalAnd %arg0, %arg0 : vector<4xi1>
-  spv.Return
+  %0 = spirv.LogicalAnd %arg0, %arg0 : vector<4xi1>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.LogicalOr
+// spirv.LogicalOr
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @logical_or_scalar
-spv.func @logical_or_scalar(%arg0: i1, %arg1: i1) "None" {
+spirv.func @logical_or_scalar(%arg0: i1, %arg1: i1) "None" {
   // CHECK: llvm.or %{{.*}}, %{{.*}} : i1
-  %0 = spv.LogicalOr %arg0, %arg0 : i1
-  spv.Return
+  %0 = spirv.LogicalOr %arg0, %arg0 : i1
+  spirv.Return
 }
 
 // CHECK-LABEL: @logical_or_vector
-spv.func @logical_or_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
+spirv.func @logical_or_vector(%arg0: vector<4xi1>, %arg1: vector<4xi1>) "None" {
   // CHECK: llvm.or %{{.*}}, %{{.*}} : vector<4xi1>
-  %0 = spv.LogicalOr %arg0, %arg0 : vector<4xi1>
-  spv.Return
+  %0 = spirv.LogicalOr %arg0, %arg0 : vector<4xi1>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
index 984b5184bbab..2c48194d1369 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/lower-host-to-llvm-calls.mlir
@@ -1,16 +1,16 @@
 // RUN: mlir-opt --lower-host-to-llvm %s | FileCheck %s
 
-module attributes {gpu.container_module, spv.target_env = #spv.target_env<#spv.vce<v1.0, [Shader], [SPV_KHR_variable_pointers]>, #spv.resource_limits<max_compute_workgroup_invocations = 128, max_compute_workgroup_size = [128, 128, 64]>>} {
+module attributes {gpu.container_module, spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Shader], [SPV_KHR_variable_pointers]>, #spirv.resource_limits<max_compute_workgroup_invocations = 128, max_compute_workgroup_size = [128, 128, 64]>>} {
 
   //       CHECK: llvm.mlir.global linkonce @__spv__foo_bar_arg_0_descriptor_set0_binding0() {addr_space = 0 : i32} : !llvm.struct<(array<6 x i32>)>
   //       CHECK: llvm.func @__spv__foo_bar()
 
-  //       CHECK: spv.module @__spv__foo
-  //       CHECK:   spv.GlobalVariable @bar_arg_0 bind(0, 0) : !spv.ptr<!spv.struct<(!spv.array<6 x i32, stride=4> [0])>, StorageBuffer>
-  //       CHECK:   spv.func @__spv__foo_bar
+  //       CHECK: spirv.module @__spv__foo
+  //       CHECK:   spirv.GlobalVariable @bar_arg_0 bind(0, 0) : !spirv.ptr<!spirv.struct<(!spirv.array<6 x i32, stride=4> [0])>, StorageBuffer>
+  //       CHECK:   spirv.func @__spv__foo_bar
 
-  //       CHECK:   spv.EntryPoint "GLCompute" @__spv__foo_bar
-  //       CHECK:   spv.ExecutionMode @__spv__foo_bar "LocalSize", 1, 1, 1
+  //       CHECK:   spirv.EntryPoint "GLCompute" @__spv__foo_bar
+  //       CHECK:   spirv.ExecutionMode @__spv__foo_bar "LocalSize", 1, 1, 1
 
   // CHECK-LABEL: @main
   //       CHECK:   %[[SRC:.*]] = llvm.extractvalue %{{.*}}[0] : !llvm.struct<(ptr<i32>, ptr<i32>, i64, array<1 x i64>, array<1 x i64>)>
@@ -21,18 +21,18 @@ module attributes {gpu.container_module, spv.target_env = #spv.target_env<#spv.v
   //  CHECK-NEXT:   llvm.mlir.constant(false) : i1
   //  CHECK-NEXT:   "llvm.intr.memcpy"(%[[SRC]], %[[DEST]], %[[SIZE]], %{{.*}}) : (!llvm.ptr<i32>, !llvm.ptr<struct<(array<6 x i32>)>>, i64, i1) -> ()
 
-  spv.module @__spv__foo Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_variable_pointers]> {
-    spv.GlobalVariable @bar_arg_0 bind(0, 0) : !spv.ptr<!spv.struct<(!spv.array<6 x i32, stride=4> [0])>, StorageBuffer>
-    spv.func @bar() "None" attributes {workgroup_attributions = 0 : i64} {
-      %0 = spv.mlir.addressof @bar_arg_0 : !spv.ptr<!spv.struct<(!spv.array<6 x i32, stride=4> [0])>, StorageBuffer>
-      spv.Return
+  spirv.module @__spv__foo Logical GLSL450 requires #spirv.vce<v1.0, [Shader], [SPV_KHR_variable_pointers]> {
+    spirv.GlobalVariable @bar_arg_0 bind(0, 0) : !spirv.ptr<!spirv.struct<(!spirv.array<6 x i32, stride=4> [0])>, StorageBuffer>
+    spirv.func @bar() "None" attributes {workgroup_attributions = 0 : i64} {
+      %0 = spirv.mlir.addressof @bar_arg_0 : !spirv.ptr<!spirv.struct<(!spirv.array<6 x i32, stride=4> [0])>, StorageBuffer>
+      spirv.Return
     }
-    spv.EntryPoint "GLCompute" @bar
-    spv.ExecutionMode @bar "LocalSize", 1, 1, 1
+    spirv.EntryPoint "GLCompute" @bar
+    spirv.ExecutionMode @bar "LocalSize", 1, 1, 1
   }
 
   gpu.module @foo {
-    gpu.func @bar(%arg0: memref<6xi32>) kernel attributes {spv.entry_point_abi = #spv.entry_point_abi<local_size = dense<1> : vector<3xi32>>} {
+    gpu.func @bar(%arg0: memref<6xi32>) kernel attributes {spirv.entry_point_abi = #spirv.entry_point_abi<local_size = dense<1> : vector<3xi32>>} {
       gpu.return
     }
   }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
index 095a3f11a54d..acdad2225ae1 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
@@ -1,229 +1,229 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.AccessChain
+// spirv.AccessChain
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @access_chain
-spv.func @access_chain() "None" {
+spirv.func @access_chain() "None" {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
-  %0 = spv.Constant 1: i32
-  %1 = spv.Variable : !spv.ptr<!spv.struct<(f32, !spv.array<4xf32>)>, Function>
+  %0 = spirv.Constant 1: i32
+  %1 = spirv.Variable : !spirv.ptr<!spirv.struct<(f32, !spirv.array<4xf32>)>, Function>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
   // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], 1, %[[ONE]]] : (!llvm.ptr<struct<packed (f32, array<4 x f32>)>>, i32, i32) -> !llvm.ptr<f32>
-  %2 = spv.AccessChain %1[%0, %0] : !spv.ptr<!spv.struct<(f32, !spv.array<4xf32>)>, Function>, i32, i32
-  spv.Return
+  %2 = spirv.AccessChain %1[%0, %0] : !spirv.ptr<!spirv.struct<(f32, !spirv.array<4xf32>)>, Function>, i32, i32
+  spirv.Return
 }
 
 // CHECK-LABEL: @access_chain_array
-spv.func @access_chain_array(%arg0 : i32) "None" {
-  %0 = spv.Variable : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>
+spirv.func @access_chain_array(%arg0 : i32) "None" {
+  %0 = spirv.Variable : !spirv.ptr<!spirv.array<4x!spirv.array<4xf32>>, Function>
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
   // CHECK: llvm.getelementptr %{{.*}}[%[[ZERO]], %{{.*}}] : (!llvm.ptr<array<4 x array<4 x f32>>>, i32, i32) -> !llvm.ptr<array<4 x f32>>
-  %1 = spv.AccessChain %0[%arg0] : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>, i32
-  %2 = spv.Load "Function" %1 ["Volatile"] : !spv.array<4xf32>
-  spv.Return
+  %1 = spirv.AccessChain %0[%arg0] : !spirv.ptr<!spirv.array<4x!spirv.array<4xf32>>, Function>, i32
+  %2 = spirv.Load "Function" %1 ["Volatile"] : !spirv.array<4xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.GlobalVariable and spv.mlir.addressof
+// spirv.GlobalVariable and spirv.mlir.addressof
 //===----------------------------------------------------------------------===//
 
-spv.module Logical GLSL450 {
+spirv.module Logical GLSL450 {
   // CHECK: llvm.mlir.global external constant @var() {addr_space = 0 : i32} : f32
-  spv.GlobalVariable @var : !spv.ptr<f32, Input>
+  spirv.GlobalVariable @var : !spirv.ptr<f32, Input>
 }
 
-spv.module Logical GLSL450 {
+spirv.module Logical GLSL450 {
   //       CHECK: llvm.mlir.global private @struct() {addr_space = 0 : i32} : !llvm.struct<packed (f32, array<10 x f32>)>
   // CHECK-LABEL: @func
   //       CHECK:   llvm.mlir.addressof @struct : !llvm.ptr<struct<packed (f32, array<10 x f32>)>>
-  spv.GlobalVariable @struct : !spv.ptr<!spv.struct<(f32, !spv.array<10xf32>)>, Private>
-  spv.func @func() "None" {
-    %0 = spv.mlir.addressof @struct : !spv.ptr<!spv.struct<(f32, !spv.array<10xf32>)>, Private>
-    spv.Return
+  spirv.GlobalVariable @struct : !spirv.ptr<!spirv.struct<(f32, !spirv.array<10xf32>)>, Private>
+  spirv.func @func() "None" {
+    %0 = spirv.mlir.addressof @struct : !spirv.ptr<!spirv.struct<(f32, !spirv.array<10xf32>)>, Private>
+    spirv.Return
   }
 }
 
-spv.module Logical GLSL450 {
+spirv.module Logical GLSL450 {
   //       CHECK: llvm.mlir.global external @bar_descriptor_set0_binding0() {addr_space = 0 : i32} : i32
   // CHECK-LABEL: @foo
   //       CHECK:   llvm.mlir.addressof @bar_descriptor_set0_binding0 : !llvm.ptr<i32>
-  spv.GlobalVariable @bar bind(0, 0) : !spv.ptr<i32, StorageBuffer>
-  spv.func @foo() "None" {
-    %0 = spv.mlir.addressof @bar : !spv.ptr<i32, StorageBuffer>
-    spv.Return
+  spirv.GlobalVariable @bar bind(0, 0) : !spirv.ptr<i32, StorageBuffer>
+  spirv.func @foo() "None" {
+    %0 = spirv.mlir.addressof @bar : !spirv.ptr<i32, StorageBuffer>
+    spirv.Return
   }
 }
 
-spv.module @name Logical GLSL450 {
+spirv.module @name Logical GLSL450 {
   //       CHECK: llvm.mlir.global external @name_bar_descriptor_set0_binding0() {addr_space = 0 : i32} : i32
   // CHECK-LABEL: @foo
   //       CHECK:   llvm.mlir.addressof @name_bar_descriptor_set0_binding0 : !llvm.ptr<i32>
-  spv.GlobalVariable @bar bind(0, 0) : !spv.ptr<i32, StorageBuffer>
-  spv.func @foo() "None" {
-    %0 = spv.mlir.addressof @bar : !spv.ptr<i32, StorageBuffer>
-    spv.Return
+  spirv.GlobalVariable @bar bind(0, 0) : !spirv.ptr<i32, StorageBuffer>
+  spirv.func @foo() "None" {
+    %0 = spirv.mlir.addressof @bar : !spirv.ptr<i32, StorageBuffer>
+    spirv.Return
   }
 }
 
-spv.module Logical GLSL450 {
+spirv.module Logical GLSL450 {
   // CHECK: llvm.mlir.global external @bar() {addr_space = 0 : i32, location = 1 : i32} : i32
   // CHECK-LABEL: @foo
-  spv.GlobalVariable @bar {location = 1 : i32} : !spv.ptr<i32, Output>
-  spv.func @foo() "None" {
-    %0 = spv.mlir.addressof @bar : !spv.ptr<i32, Output>
-    spv.Return
+  spirv.GlobalVariable @bar {location = 1 : i32} : !spirv.ptr<i32, Output>
+  spirv.func @foo() "None" {
+    %0 = spirv.mlir.addressof @bar : !spirv.ptr<i32, Output>
+    spirv.Return
   }
 }
 
-spv.module Logical GLSL450 {
+spirv.module Logical GLSL450 {
   // CHECK: llvm.mlir.global external constant @bar() {addr_space = 0 : i32, location = 3 : i32} : f32
   // CHECK-LABEL: @foo
-  spv.GlobalVariable @bar {descriptor_set = 0 : i32, location = 3 : i32} : !spv.ptr<f32, UniformConstant>
-  spv.func @foo() "None" {
-    %0 = spv.mlir.addressof @bar : !spv.ptr<f32, UniformConstant>
-    spv.Return
+  spirv.GlobalVariable @bar {descriptor_set = 0 : i32, location = 3 : i32} : !spirv.ptr<f32, UniformConstant>
+  spirv.func @foo() "None" {
+    %0 = spirv.mlir.addressof @bar : !spirv.ptr<f32, UniformConstant>
+    spirv.Return
   }
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Load
+// spirv.Load
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @load
-spv.func @load() "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @load() "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   //  CHECK: llvm.load %{{.*}} : !llvm.ptr<f32>
-  %1 = spv.Load "Function" %0 : f32
-  spv.Return
+  %1 = spirv.Load "Function" %0 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @load_none
-spv.func @load_none() "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @load_none() "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   //  CHECK: llvm.load %{{.*}} : !llvm.ptr<f32>
-  %1 = spv.Load "Function" %0 ["None"] : f32
-  spv.Return
+  %1 = spirv.Load "Function" %0 ["None"] : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @load_with_alignment
-spv.func @load_with_alignment() "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @load_with_alignment() "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: llvm.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr<f32>
-  %1 = spv.Load "Function" %0 ["Aligned", 4] : f32
-  spv.Return
+  %1 = spirv.Load "Function" %0 ["Aligned", 4] : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @load_volatile
-spv.func @load_volatile() "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @load_volatile() "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: llvm.load volatile %{{.*}} : !llvm.ptr<f32>
-  %1 = spv.Load "Function" %0 ["Volatile"] : f32
-  spv.Return
+  %1 = spirv.Load "Function" %0 ["Volatile"] : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @load_nontemporal
-spv.func @load_nontemporal() "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @load_nontemporal() "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: llvm.load %{{.*}} {nontemporal} : !llvm.ptr<f32>
-  %1 = spv.Load "Function" %0 ["Nontemporal"] : f32
-  spv.Return
+  %1 = spirv.Load "Function" %0 ["Nontemporal"] : f32
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Store
+// spirv.Store
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @store
-spv.func @store(%arg0 : f32) "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @store(%arg0 : f32) "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.ptr<f32>
-  spv.Store "Function" %0, %arg0 : f32
-  spv.Return
+  spirv.Store "Function" %0, %arg0 : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @store_composite
-spv.func @store_composite(%arg0 : !spv.struct<(f64)>) "None" {
-  %0 = spv.Variable : !spv.ptr<!spv.struct<(f64)>, Function>
+spirv.func @store_composite(%arg0 : !spirv.struct<(f64)>) "None" {
+  %0 = spirv.Variable : !spirv.ptr<!spirv.struct<(f64)>, Function>
   // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.ptr<struct<packed (f64)>>
-  spv.Store "Function" %0, %arg0 : !spv.struct<(f64)>
-  spv.Return
+  spirv.Store "Function" %0, %arg0 : !spirv.struct<(f64)>
+  spirv.Return
 }
 
 // CHECK-LABEL: @store_with_alignment
-spv.func @store_with_alignment(%arg0 : f32) "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @store_with_alignment(%arg0 : f32) "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 4 : i64} : !llvm.ptr<f32>
-  spv.Store "Function" %0, %arg0 ["Aligned", 4] : f32
-  spv.Return
+  spirv.Store "Function" %0, %arg0 ["Aligned", 4] : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @store_volatile
-spv.func @store_volatile(%arg0 : f32) "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @store_volatile(%arg0 : f32) "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: llvm.store volatile %{{.*}}, %{{.*}} : !llvm.ptr<f32>
-  spv.Store "Function" %0, %arg0 ["Volatile"] : f32
-  spv.Return
+  spirv.Store "Function" %0, %arg0 ["Volatile"] : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @store_nontemporal
-spv.func @store_nontemporal(%arg0 : f32) "None" {
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+spirv.func @store_nontemporal(%arg0 : f32) "None" {
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: llvm.store %{{.*}}, %{{.*}} {nontemporal} : !llvm.ptr<f32>
-  spv.Store "Function" %0, %arg0 ["Nontemporal"] : f32
-  spv.Return
+  spirv.Store "Function" %0, %arg0 ["Nontemporal"] : f32
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Variable
+// spirv.Variable
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @variable_scalar
-spv.func @variable_scalar() "None" {
+spirv.func @variable_scalar() "None" {
   // CHECK: %[[SIZE1:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: llvm.alloca %[[SIZE1]] x f32 : (i32) -> !llvm.ptr<f32>
-  %0 = spv.Variable : !spv.ptr<f32, Function>
+  %0 = spirv.Variable : !spirv.ptr<f32, Function>
   // CHECK: %[[SIZE2:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: llvm.alloca %[[SIZE2]] x i8 : (i32) -> !llvm.ptr<i8>
-  %1 = spv.Variable : !spv.ptr<i8, Function>
-  spv.Return
+  %1 = spirv.Variable : !spirv.ptr<i8, Function>
+  spirv.Return
 }
 
 // CHECK-LABEL: @variable_scalar_with_initialization
-spv.func @variable_scalar_with_initialization() "None" {
+spirv.func @variable_scalar_with_initialization() "None" {
   // CHECK: %[[VALUE:.*]] = llvm.mlir.constant(0 : i64) : i64
   // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr<i64>
   // CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : !llvm.ptr<i64>
-  %c = spv.Constant 0 : i64
-  %0 = spv.Variable init(%c) : !spv.ptr<i64, Function>
-  spv.Return
+  %c = spirv.Constant 0 : i64
+  %0 = spirv.Variable init(%c) : !spirv.ptr<i64, Function>
+  spirv.Return
 }
 
 // CHECK-LABEL: @variable_vector
-spv.func @variable_vector() "None" {
+spirv.func @variable_vector() "None" {
   // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: llvm.alloca  %[[SIZE]] x vector<3xf32> : (i32) -> !llvm.ptr<vector<3xf32>>
-  %0 = spv.Variable : !spv.ptr<vector<3xf32>, Function>
-  spv.Return
+  %0 = spirv.Variable : !spirv.ptr<vector<3xf32>, Function>
+  spirv.Return
 }
 
 // CHECK-LABEL: @variable_vector_with_initialization
-spv.func @variable_vector_with_initialization() "None" {
+spirv.func @variable_vector_with_initialization() "None" {
   // CHECK: %[[VALUE:.*]] = llvm.mlir.constant(dense<false> : vector<3xi1>) : vector<3xi1>
   // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x vector<3xi1> : (i32) -> !llvm.ptr<vector<3xi1>>
   // CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : !llvm.ptr<vector<3xi1>>
-  %c = spv.Constant dense<false> : vector<3xi1>
-  %0 = spv.Variable init(%c) : !spv.ptr<vector<3xi1>, Function>
-  spv.Return
+  %c = spirv.Constant dense<false> : vector<3xi1>
+  %0 = spirv.Variable init(%c) : !spirv.ptr<vector<3xi1>, Function>
+  spirv.Return
 }
 
 // CHECK-LABEL: @variable_array
-spv.func @variable_array() "None" {
+spirv.func @variable_array() "None" {
   // CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: llvm.alloca %[[SIZE]] x !llvm.array<10 x i32> : (i32) -> !llvm.ptr<array<10 x i32>>
-  %0 = spv.Variable : !spv.ptr<!spv.array<10 x i32>, Function>
-  spv.Return
+  %0 = spirv.Variable : !spirv.ptr<!spirv.array<10 x i32>, Function>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
index e438e03ebb37..13bde6e6fc56 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/misc-ops-to-llvm.mlir
@@ -1,75 +1,75 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeExtract
+// spirv.CompositeExtract
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @composite_extract_array
-spv.func @composite_extract_array(%arg: !spv.array<4x!spv.array<4xf32>>) "None" {
+spirv.func @composite_extract_array(%arg: !spirv.array<4x!spirv.array<4xf32>>) "None" {
   // CHECK: llvm.extractvalue %{{.*}}[1, 3] : !llvm.array<4 x array<4 x f32>>
-  %0 = spv.CompositeExtract %arg[1 : i32, 3 : i32] : !spv.array<4x!spv.array<4xf32>>
-  spv.Return
+  %0 = spirv.CompositeExtract %arg[1 : i32, 3 : i32] : !spirv.array<4x!spirv.array<4xf32>>
+  spirv.Return
 }
 
 // CHECK-LABEL: @composite_extract_vector
-spv.func @composite_extract_vector(%arg: vector<3xf32>) "None" {
+spirv.func @composite_extract_vector(%arg: vector<3xf32>) "None" {
   // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i32) : i32
   // CHECK: llvm.extractelement %{{.*}}[%[[ZERO]] : i32] : vector<3xf32>
-  %0 = spv.CompositeExtract %arg[0 : i32] : vector<3xf32>
-  spv.Return
+  %0 = spirv.CompositeExtract %arg[0 : i32] : vector<3xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeInsert
+// spirv.CompositeInsert
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @composite_insert_struct
-spv.func @composite_insert_struct(%arg0: i32, %arg1: !spv.struct<(f32, !spv.array<4xi32>)>) "None" {
+spirv.func @composite_insert_struct(%arg0: i32, %arg1: !spirv.struct<(f32, !spirv.array<4xi32>)>) "None" {
   // CHECK: llvm.insertvalue %{{.*}}, %{{.*}}[1, 3] : !llvm.struct<packed (f32, array<4 x i32>)>
-  %0 = spv.CompositeInsert %arg0, %arg1[1 : i32, 3 : i32] : i32 into !spv.struct<(f32, !spv.array<4xi32>)>
-  spv.Return
+  %0 = spirv.CompositeInsert %arg0, %arg1[1 : i32, 3 : i32] : i32 into !spirv.struct<(f32, !spirv.array<4xi32>)>
+  spirv.Return
 }
 
 // CHECK-LABEL: @composite_insert_vector
-spv.func @composite_insert_vector(%arg0: vector<3xf32>, %arg1: f32) "None" {
+spirv.func @composite_insert_vector(%arg0: vector<3xf32>, %arg1: f32) "None" {
   // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i32) : i32
   // CHECK: llvm.insertelement %{{.*}}, %{{.*}}[%[[ONE]] : i32] : vector<3xf32>
-  %0 = spv.CompositeInsert %arg1, %arg0[1 : i32] : f32 into vector<3xf32>
-  spv.Return
+  %0 = spirv.CompositeInsert %arg1, %arg0[1 : i32] : f32 into vector<3xf32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Select
+// spirv.Select
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @select_scalar
-spv.func @select_scalar(%arg0: i1, %arg1: vector<3xi32>, %arg2: f32) "None" {
+spirv.func @select_scalar(%arg0: i1, %arg1: vector<3xi32>, %arg2: f32) "None" {
   // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, vector<3xi32>
-  %0 = spv.Select %arg0, %arg1, %arg1 : i1, vector<3xi32>
+  %0 = spirv.Select %arg0, %arg1, %arg1 : i1, vector<3xi32>
   // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : i1, f32
-  %1 = spv.Select %arg0, %arg2, %arg2 : i1, f32
-  spv.Return
+  %1 = spirv.Select %arg0, %arg2, %arg2 : i1, f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @select_vector
-spv.func @select_vector(%arg0: vector<2xi1>, %arg1: vector<2xi32>) "None" {
+spirv.func @select_vector(%arg0: vector<2xi1>, %arg1: vector<2xi32>) "None" {
   // CHECK: llvm.select %{{.*}}, %{{.*}}, %{{.*}} : vector<2xi1>, vector<2xi32>
-  %0 = spv.Select %arg0, %arg1, %arg1 : vector<2xi1>, vector<2xi32>
-  spv.Return
+  %0 = spirv.Select %arg0, %arg1, %arg1 : vector<2xi1>, vector<2xi32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.VectorShuffle
+// spirv.VectorShuffle
 //===----------------------------------------------------------------------===//
 
-spv.func @vector_shuffle_same_size(%vector1: vector<2xf32>, %vector2: vector<2xf32>) -> vector<3xf32> "None" {
+spirv.func @vector_shuffle_same_size(%vector1: vector<2xf32>, %vector2: vector<2xf32>) -> vector<3xf32> "None" {
   //      CHECK: %[[res:.*]] = llvm.shufflevector {{.*}} [0, 2, -1] : vector<2xf32>
   // CHECK-NEXT: return %[[res]] : vector<3xf32>
-  %0 = spv.VectorShuffle [0: i32, 2: i32, 0xffffffff: i32] %vector1: vector<2xf32>, %vector2: vector<2xf32> -> vector<3xf32>
-  spv.ReturnValue %0: vector<3xf32>
+  %0 = spirv.VectorShuffle [0: i32, 2: i32, 0xffffffff: i32] %vector1: vector<2xf32>, %vector2: vector<2xf32> -> vector<3xf32>
+  spirv.ReturnValue %0: vector<3xf32>
 }
 
-spv.func @vector_shuffle_
diff erent_size(%vector1: vector<3xf32>, %vector2: vector<2xf32>) -> vector<3xf32> "None" {
+spirv.func @vector_shuffle_
diff erent_size(%vector1: vector<3xf32>, %vector2: vector<2xf32>) -> vector<3xf32> "None" {
   //      CHECK: %[[UNDEF:.*]] = llvm.mlir.undef : vector<3xf32>
   // CHECK-NEXT: %[[C0_0:.*]] = llvm.mlir.constant(0 : i32) : i32
   // CHECK-NEXT: %[[C0_1:.*]] = llvm.mlir.constant(0 : i32) : i32
@@ -80,12 +80,12 @@ spv.func @vector_shuffle_
diff erent_size(%vector1: vector<3xf32>, %vector2: vecto
   // CHECK-NEXT: %[[EXT1:.*]] = llvm.extractelement {{.*}}[%[[C1_1]] : i32] : vector<2xf32>
   // CHECK-NEXT: %[[RES:.*]] = llvm.insertelement %[[EXT1]], %[[INSERT0]][%[[C1_0]] : i32] : vector<3xf32>
   // CHECK-NEXT: llvm.return %[[RES]] : vector<3xf32>
-  %0 = spv.VectorShuffle [0: i32, 4: i32, 0xffffffff: i32] %vector1: vector<3xf32>, %vector2: vector<2xf32> -> vector<3xf32>
-  spv.ReturnValue %0: vector<3xf32>
+  %0 = spirv.VectorShuffle [0: i32, 4: i32, 0xffffffff: i32] %vector1: vector<3xf32>, %vector2: vector<2xf32> -> vector<3xf32>
+  spirv.ReturnValue %0: vector<3xf32>
 }
 
 //===----------------------------------------------------------------------===//
-// spv.EntryPoint and spv.ExecutionMode
+// spirv.EntryPoint and spirv.ExecutionMode
 //===----------------------------------------------------------------------===//
 
 //      CHECK: module {
@@ -99,12 +99,12 @@ spv.func @vector_shuffle_
diff erent_size(%vector1: vector<3xf32>, %vector2: vecto
 // CHECK-NEXT:     llvm.return
 // CHECK-NEXT:   }
 // CHECK-NEXT: }
-spv.module Logical OpenCL {
-  spv.func @empty() "None" {
-    spv.Return
+spirv.module Logical OpenCL {
+  spirv.func @empty() "None" {
+    spirv.Return
   }
-  spv.EntryPoint "Kernel" @empty
-  spv.ExecutionMode @empty "ContractionOff"
+  spirv.EntryPoint "Kernel" @empty
+  spirv.ExecutionMode @empty "ContractionOff"
 }
 
 //      CHECK: module {
@@ -125,29 +125,29 @@ spv.module Logical OpenCL {
 // CHECK-NEXT:     llvm.return
 // CHECK-NEXT:   }
 // CHECK-NEXT: }
-spv.module Logical OpenCL {
-  spv.func @bar() "None" {
-    spv.Return
+spirv.module Logical OpenCL {
+  spirv.func @bar() "None" {
+    spirv.Return
   }
-  spv.EntryPoint "Kernel" @bar
-  spv.ExecutionMode @bar "ContractionOff"
-  spv.ExecutionMode @bar "LocalSizeHint", 32, 1, 1
+  spirv.EntryPoint "Kernel" @bar
+  spirv.ExecutionMode @bar "ContractionOff"
+  spirv.ExecutionMode @bar "LocalSizeHint", 32, 1, 1
 }
 
 //===----------------------------------------------------------------------===//
-// spv.Undef
+// spirv.Undef
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @undef_scalar
-spv.func @undef_scalar() "None" {
+spirv.func @undef_scalar() "None" {
   // CHECK: llvm.mlir.undef : f32
-  %0 = spv.Undef : f32
-  spv.Return
+  %0 = spirv.Undef : f32
+  spirv.Return
 }
 
 // CHECK-LABEL: @undef_vector
-spv.func @undef_vector() "None" {
+spirv.func @undef_vector() "None" {
   // CHECK: llvm.mlir.undef : vector<2xi32>
-  %0 = spv.Undef : vector<2xi32>
-  spv.Return
+  %0 = spirv.Undef : vector<2xi32>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/module-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/module-ops-to-llvm.mlir
index c75214920b5a..894de8b9e90a 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/module-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/module-ops-to-llvm.mlir
@@ -1,23 +1,23 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.module
+// spirv.module
 //===----------------------------------------------------------------------===//
 
 // CHECK: module
-spv.module Logical GLSL450 {}
+spirv.module Logical GLSL450 {}
 
 // CHECK: module @foo
-spv.module @foo Logical GLSL450 {}
+spirv.module @foo Logical GLSL450 {}
 
 // CHECK: module
-spv.module Logical GLSL450 requires #spv.vce<v1.0, [Shader], [SPV_KHR_16bit_storage]> {}
+spirv.module Logical GLSL450 requires #spirv.vce<v1.0, [Shader], [SPV_KHR_16bit_storage]> {}
 
 // CHECK: module
-spv.module Logical GLSL450 {
+spirv.module Logical GLSL450 {
 	// CHECK-LABEL: llvm.func @empty()
-  spv.func @empty() -> () "None" {
+  spirv.func @empty() -> () "None" {
 		// CHECK: llvm.return
-    spv.Return
+    spirv.Return
   }
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
index 9b76409dbd47..da4def7aeda8 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/shift-ops-to-llvm.mlir
@@ -1,121 +1,121 @@
 // RUN: mlir-opt -convert-spirv-to-llvm %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftRightArithmetic
+// spirv.ShiftRightArithmetic
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @shift_right_arithmetic_scalar
-spv.func @shift_right_arithmetic_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" {
+spirv.func @shift_right_arithmetic_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" {
   // CHECK: llvm.ashr %{{.*}}, %{{.*}} : i32
-  %0 = spv.ShiftRightArithmetic %arg0, %arg0 : i32, i32
+  %0 = spirv.ShiftRightArithmetic %arg0, %arg0 : i32, i32
 
   // CHECK: llvm.ashr %{{.*}}, %{{.*}} : i32
-  %1 = spv.ShiftRightArithmetic %arg0, %arg1 : i32, si32
+  %1 = spirv.ShiftRightArithmetic %arg0, %arg1 : i32, si32
 
   // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32
   // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : i32
-  %2 = spv.ShiftRightArithmetic %arg0, %arg2 : i32, i16
+  %2 = spirv.ShiftRightArithmetic %arg0, %arg2 : i32, i16
 
   // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32
   // CHECK: llvm.ashr %{{.*}}, %[[ZEXT]] : i32
-  %3 = spv.ShiftRightArithmetic %arg0, %arg3 : i32, ui16
-  spv.Return
+  %3 = spirv.ShiftRightArithmetic %arg0, %arg3 : i32, ui16
+  spirv.Return
 }
 
 // CHECK-LABEL: @shift_right_arithmetic_vector
-spv.func @shift_right_arithmetic_vector(%arg0: vector<4xi64>, %arg1: vector<4xui64>, %arg2: vector<4xi32>, %arg3: vector<4xui32>) "None" {
+spirv.func @shift_right_arithmetic_vector(%arg0: vector<4xi64>, %arg1: vector<4xui64>, %arg2: vector<4xi32>, %arg3: vector<4xui32>) "None" {
   // CHECK: llvm.ashr %{{.*}}, %{{.*}} : vector<4xi64>
-  %0 = spv.ShiftRightArithmetic %arg0, %arg0 : vector<4xi64>, vector<4xi64>
+  %0 = spirv.ShiftRightArithmetic %arg0, %arg0 : vector<4xi64>, vector<4xi64>
 
   // CHECK: llvm.ashr %{{.*}}, %{{.*}} : vector<4xi64>
-  %1 = spv.ShiftRightArithmetic %arg0, %arg1 : vector<4xi64>, vector<4xui64>
+  %1 = spirv.ShiftRightArithmetic %arg0, %arg1 : vector<4xi64>, vector<4xui64>
 
   // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : vector<4xi32> to vector<4xi64>
   // CHECK: llvm.ashr %{{.*}}, %[[SEXT]] : vector<4xi64>
-  %2 = spv.ShiftRightArithmetic %arg0, %arg2 : vector<4xi64>,  vector<4xi32>
+  %2 = spirv.ShiftRightArithmetic %arg0, %arg2 : vector<4xi64>,  vector<4xi32>
 
   // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : vector<4xi32> to vector<4xi64>
   // CHECK: llvm.ashr %{{.*}}, %[[ZEXT]] : vector<4xi64>
-  %3 = spv.ShiftRightArithmetic %arg0, %arg3 : vector<4xi64>, vector<4xui32>
-  spv.Return
+  %3 = spirv.ShiftRightArithmetic %arg0, %arg3 : vector<4xi64>, vector<4xui32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftRightLogical
+// spirv.ShiftRightLogical
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @shift_right_logical_scalar
-spv.func @shift_right_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : si16, %arg3 : ui16) "None" {
+spirv.func @shift_right_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : si16, %arg3 : ui16) "None" {
   // CHECK: llvm.lshr %{{.*}}, %{{.*}} : i32
-  %0 = spv.ShiftRightLogical %arg0, %arg0 : i32, i32
+  %0 = spirv.ShiftRightLogical %arg0, %arg0 : i32, i32
 
   // CHECK: llvm.lshr %{{.*}}, %{{.*}} : i32
-  %1 = spv.ShiftRightLogical %arg0, %arg1 : i32, si32
+  %1 = spirv.ShiftRightLogical %arg0, %arg1 : i32, si32
 
   // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32
   // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : i32
-  %2 = spv.ShiftRightLogical %arg0, %arg2 : i32, si16
+  %2 = spirv.ShiftRightLogical %arg0, %arg2 : i32, si16
 
   // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32
   // CHECK: llvm.lshr %{{.*}}, %[[ZEXT]] : i32
-  %3 = spv.ShiftRightLogical %arg0, %arg3 : i32, ui16
-  spv.Return
+  %3 = spirv.ShiftRightLogical %arg0, %arg3 : i32, ui16
+  spirv.Return
 }
 
 // CHECK-LABEL: @shift_right_logical_vector
-spv.func @shift_right_logical_vector(%arg0: vector<4xi64>, %arg1: vector<4xsi64>, %arg2: vector<4xi32>, %arg3: vector<4xui32>) "None" {
+spirv.func @shift_right_logical_vector(%arg0: vector<4xi64>, %arg1: vector<4xsi64>, %arg2: vector<4xi32>, %arg3: vector<4xui32>) "None" {
   // CHECK: llvm.lshr %{{.*}}, %{{.*}} : vector<4xi64>
-  %0 = spv.ShiftRightLogical %arg0, %arg0 : vector<4xi64>, vector<4xi64>
+  %0 = spirv.ShiftRightLogical %arg0, %arg0 : vector<4xi64>, vector<4xi64>
 
   // CHECK: llvm.lshr %{{.*}}, %{{.*}} : vector<4xi64>
-  %1 = spv.ShiftRightLogical %arg0, %arg1 : vector<4xi64>, vector<4xsi64>
+  %1 = spirv.ShiftRightLogical %arg0, %arg1 : vector<4xi64>, vector<4xsi64>
 
   // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : vector<4xi32> to vector<4xi64>
   // CHECK: llvm.lshr %{{.*}}, %[[SEXT]] : vector<4xi64>
-  %2 = spv.ShiftRightLogical %arg0, %arg2 : vector<4xi64>,  vector<4xi32>
+  %2 = spirv.ShiftRightLogical %arg0, %arg2 : vector<4xi64>,  vector<4xi32>
 
   // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : vector<4xi32> to vector<4xi64>
   // CHECK: llvm.lshr %{{.*}}, %[[ZEXT]] : vector<4xi64>
-  %3 = spv.ShiftRightLogical %arg0, %arg3 : vector<4xi64>, vector<4xui32>
-  spv.Return
+  %3 = spirv.ShiftRightLogical %arg0, %arg3 : vector<4xi64>, vector<4xui32>
+  spirv.Return
 }
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftLeftLogical
+// spirv.ShiftLeftLogical
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @shift_left_logical_scalar
-spv.func @shift_left_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" {
+spirv.func @shift_left_logical_scalar(%arg0: i32, %arg1: si32, %arg2 : i16, %arg3 : ui16) "None" {
   // CHECK: llvm.shl %{{.*}}, %{{.*}} : i32
-  %0 = spv.ShiftLeftLogical %arg0, %arg0 : i32, i32
+  %0 = spirv.ShiftLeftLogical %arg0, %arg0 : i32, i32
 
   // CHECK: llvm.shl %{{.*}}, %{{.*}} : i32
-  %1 = spv.ShiftLeftLogical %arg0, %arg1 : i32, si32
+  %1 = spirv.ShiftLeftLogical %arg0, %arg1 : i32, si32
 
   // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : i16 to i32
   // CHECK: llvm.shl %{{.*}}, %[[SEXT]] : i32
-  %2 = spv.ShiftLeftLogical %arg0, %arg2 : i32, i16
+  %2 = spirv.ShiftLeftLogical %arg0, %arg2 : i32, i16
 
   // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : i16 to i32
   // CHECK: llvm.shl %{{.*}}, %[[ZEXT]] : i32
-  %3 = spv.ShiftLeftLogical %arg0, %arg3 : i32, ui16
-  spv.Return
+  %3 = spirv.ShiftLeftLogical %arg0, %arg3 : i32, ui16
+  spirv.Return
 }
 
 // CHECK-LABEL: @shift_left_logical_vector
-spv.func @shift_left_logical_vector(%arg0: vector<4xi64>, %arg1: vector<4xsi64>, %arg2: vector<4xi32>, %arg3: vector<4xui32>) "None" {
+spirv.func @shift_left_logical_vector(%arg0: vector<4xi64>, %arg1: vector<4xsi64>, %arg2: vector<4xi32>, %arg3: vector<4xui32>) "None" {
   // CHECK: llvm.shl %{{.*}}, %{{.*}} : vector<4xi64>
-  %0 = spv.ShiftLeftLogical %arg0, %arg0 : vector<4xi64>, vector<4xi64>
+  %0 = spirv.ShiftLeftLogical %arg0, %arg0 : vector<4xi64>, vector<4xi64>
 
   // CHECK: llvm.shl %{{.*}}, %{{.*}} : vector<4xi64>
-  %1 = spv.ShiftLeftLogical %arg0, %arg1 : vector<4xi64>, vector<4xsi64>
+  %1 = spirv.ShiftLeftLogical %arg0, %arg1 : vector<4xi64>, vector<4xsi64>
 
   // CHECK: %[[SEXT:.*]] = llvm.sext %{{.*}} : vector<4xi32> to vector<4xi64>
   // CHECK: llvm.shl %{{.*}}, %[[SEXT]] : vector<4xi64>
-  %2 = spv.ShiftLeftLogical %arg0, %arg2 : vector<4xi64>,  vector<4xi32>
+  %2 = spirv.ShiftLeftLogical %arg0, %arg2 : vector<4xi64>,  vector<4xi32>
 
   // CHECK: %[[ZEXT:.*]] = llvm.zext %{{.*}} : vector<4xi32> to vector<4xi64>
   // CHECK: llvm.shl %{{.*}}, %[[ZEXT]] : vector<4xi64>
-  %3 = spv.ShiftLeftLogical %arg0, %arg3 : vector<4xi64>, vector<4xui32>
-  spv.Return
+  %3 = spirv.ShiftLeftLogical %arg0, %arg3 : vector<4xi64>, vector<4xui32>
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm-invalid.mlir b/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm-invalid.mlir
index 20207456387b..084652aff958 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm-invalid.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm-invalid.mlir
@@ -1,20 +1,20 @@
 // RUN: mlir-opt %s -convert-spirv-to-llvm -verify-diagnostics -split-input-file
 
-// expected-error at +1 {{failed to legalize operation 'spv.func' that was explicitly marked illegal}}
-spv.func @array_with_unnatural_stride(%arg: !spv.array<4 x f32, stride=8>) -> () "None" {
-  spv.Return
+// expected-error at +1 {{failed to legalize operation 'spirv.func' that was explicitly marked illegal}}
+spirv.func @array_with_unnatural_stride(%arg: !spirv.array<4 x f32, stride=8>) -> () "None" {
+  spirv.Return
 }
 
 // -----
 
-// expected-error at +1 {{failed to legalize operation 'spv.func' that was explicitly marked illegal}}
-spv.func @struct_with_unnatural_offset(%arg: !spv.struct<(i32[0], i32[8])>) -> () "None" {
-  spv.Return
+// expected-error at +1 {{failed to legalize operation 'spirv.func' that was explicitly marked illegal}}
+spirv.func @struct_with_unnatural_offset(%arg: !spirv.struct<(i32[0], i32[8])>) -> () "None" {
+  spirv.Return
 }
 
 // -----
 
-// expected-error at +1 {{failed to legalize operation 'spv.func' that was explicitly marked illegal}}
-spv.func @struct_with_decorations(%arg: !spv.struct<(f32 [RelaxedPrecision])>) -> () "None" {
-  spv.Return
+// expected-error at +1 {{failed to legalize operation 'spirv.func' that was explicitly marked illegal}}
+spirv.func @struct_with_decorations(%arg: !spirv.struct<(f32 [RelaxedPrecision])>) -> () "None" {
+  spirv.Return
 }

diff  --git a/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
index 1d573f3f2505..39038ad47f21 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/spirv-types-to-llvm.mlir
@@ -5,40 +5,40 @@
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @array(!llvm.array<16 x f32>, !llvm.array<32 x vector<4xf32>>)
-spv.func @array(!spv.array<16 x f32>, !spv.array< 32 x vector<4xf32> >) "None"
+spirv.func @array(!spirv.array<16 x f32>, !spirv.array< 32 x vector<4xf32> >) "None"
 
 // CHECK-LABEL: @array_with_natural_stride(!llvm.array<16 x f32>)
-spv.func @array_with_natural_stride(!spv.array<16 x f32, stride=4>) "None"
+spirv.func @array_with_natural_stride(!spirv.array<16 x f32, stride=4>) "None"
 
 //===----------------------------------------------------------------------===//
 // Pointer type
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @pointer_scalar(!llvm.ptr<i1>, !llvm.ptr<f32>)
-spv.func @pointer_scalar(!spv.ptr<i1, Uniform>, !spv.ptr<f32, Private>) "None"
+spirv.func @pointer_scalar(!spirv.ptr<i1, Uniform>, !spirv.ptr<f32, Private>) "None"
 
 // CHECK-LABEL: @pointer_vector(!llvm.ptr<vector<4xi32>>)
-spv.func @pointer_vector(!spv.ptr<vector<4xi32>, Function>) "None"
+spirv.func @pointer_vector(!spirv.ptr<vector<4xi32>, Function>) "None"
 
 //===----------------------------------------------------------------------===//
 // Runtime array type
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @runtime_array_vector(!llvm.array<0 x vector<4xf32>>)
-spv.func @runtime_array_vector(!spv.rtarray< vector<4xf32> >) "None"
+spirv.func @runtime_array_vector(!spirv.rtarray< vector<4xf32> >) "None"
 
 // CHECK-LABEL: @runtime_array_scalar(!llvm.array<0 x f32>)
-spv.func @runtime_array_scalar(!spv.rtarray<f32>) "None"
+spirv.func @runtime_array_scalar(!spirv.rtarray<f32>) "None"
 
 //===----------------------------------------------------------------------===//
 // Struct type
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @struct(!llvm.struct<packed (f64)>)
-spv.func @struct(!spv.struct<(f64)>) "None"
+spirv.func @struct(!spirv.struct<(f64)>) "None"
 
 // CHECK-LABEL: @struct_nested(!llvm.struct<packed (i32, struct<packed (i64, i32)>)>)
-spv.func @struct_nested(!spv.struct<(i32, !spv.struct<(i64, i32)>)>) "None"
+spirv.func @struct_nested(!spirv.struct<(i32, !spirv.struct<(i64, i32)>)>) "None"
 
 // CHECK-LABEL: @struct_with_natural_offset(!llvm.struct<(i8, i32)>)
-spv.func @struct_with_natural_offset(!spv.struct<(i8[0], i32[4])>) "None"
+spirv.func @struct_with_natural_offset(!spirv.struct<(i8[0], i32[4])>) "None"

diff  --git a/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir b/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
index 1ad8df1f93e4..904560d0572b 100644
--- a/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
+++ b/mlir/test/Conversion/TensorToSPIRV/tensor-ops-to-spirv.mlir
@@ -7,23 +7,23 @@
 // CHECK-LABEL: func @tensor_extract_constant
 // CHECK-SAME: (%[[A:.+]]: i32, %[[B:.+]]: i32, %[[C:.+]]: i32)
 func.func @tensor_extract_constant(%a : index, %b: index, %c: index) -> i32 {
-  // CHECK: %[[CST:.+]] = spv.Constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]>
+  // CHECK: %[[CST:.+]] = spirv.Constant dense<[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]>
   %cst = arith.constant dense<[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]> : tensor<2x2x3xi32>
-  // CHECK: %[[VAR:.+]] = spv.Variable : !spv.ptr<!spv.array<12 x i32>, Function>
-  // CHECK: spv.Store "Function" %[[VAR]], %[[CST]] : !spv.array<12 x i32>
-  // CHECK: %[[C0:.+]] = spv.Constant 0 : i32
-  // CHECK: %[[C6:.+]] = spv.Constant 6 : i32
-  // CHECK: %[[MUL0:.+]] = spv.IMul %[[C6]], %[[A]] : i32
-  // CHECK: %[[ADD0:.+]] = spv.IAdd %[[C0]], %[[MUL0]] : i32
-  // CHECK: %[[C3:.+]] = spv.Constant 3 : i32
-  // CHECK: %[[MUL1:.+]] = spv.IMul %[[C3]], %[[B]] : i32
-  // CHECK: %[[ADD1:.+]] = spv.IAdd %[[ADD0]], %[[MUL1]] : i32
-  // CHECK: %[[C1:.+]] = spv.Constant 1 : i32
-  // CHECK: %[[MUL2:.+]] = spv.IMul %[[C1]], %[[C]] : i32
-  // CHECK: %[[ADD2:.+]] = spv.IAdd %[[ADD1]], %[[MUL2]] : i32
-  // CHECK: %[[AC:.+]] = spv.AccessChain %[[VAR]][%[[ADD2]]]
-  // CHECK: %[[VAL:.+]] = spv.Load "Function" %[[AC]] : i32
+  // CHECK: %[[VAR:.+]] = spirv.Variable : !spirv.ptr<!spirv.array<12 x i32>, Function>
+  // CHECK: spirv.Store "Function" %[[VAR]], %[[CST]] : !spirv.array<12 x i32>
+  // CHECK: %[[C0:.+]] = spirv.Constant 0 : i32
+  // CHECK: %[[C6:.+]] = spirv.Constant 6 : i32
+  // CHECK: %[[MUL0:.+]] = spirv.IMul %[[C6]], %[[A]] : i32
+  // CHECK: %[[ADD0:.+]] = spirv.IAdd %[[C0]], %[[MUL0]] : i32
+  // CHECK: %[[C3:.+]] = spirv.Constant 3 : i32
+  // CHECK: %[[MUL1:.+]] = spirv.IMul %[[C3]], %[[B]] : i32
+  // CHECK: %[[ADD1:.+]] = spirv.IAdd %[[ADD0]], %[[MUL1]] : i32
+  // CHECK: %[[C1:.+]] = spirv.Constant 1 : i32
+  // CHECK: %[[MUL2:.+]] = spirv.IMul %[[C1]], %[[C]] : i32
+  // CHECK: %[[ADD2:.+]] = spirv.IAdd %[[ADD1]], %[[MUL2]] : i32
+  // CHECK: %[[AC:.+]] = spirv.AccessChain %[[VAR]][%[[ADD2]]]
+  // CHECK: %[[VAL:.+]] = spirv.Load "Function" %[[AC]] : i32
   %extract = tensor.extract %cst[%a, %b, %c] : tensor<2x2x3xi32>
-  // CHECK: spv.ReturnValue %[[VAL]]
+  // CHECK: spirv.ReturnValue %[[VAL]]
   return %extract : i32
 }

diff  --git a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
index afce0493ebb7..a1f05e9a620c 100644
--- a/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
+++ b/mlir/test/Conversion/VectorToSPIRV/vector-to-spirv.mlir
@@ -1,11 +1,11 @@
 // RUN: mlir-opt -split-input-file -convert-vector-to-spirv -verify-diagnostics %s -o - | FileCheck %s
 
-module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Float16], []>, #spv.resource_limits<>> } {
+module attributes { spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Float16], []>, #spirv.resource_limits<>> } {
 
 // CHECK-LABEL: @bitcast
 //  CHECK-SAME: %[[ARG0:.+]]: vector<2xf32>, %[[ARG1:.+]]: vector<2xf16>
-//       CHECK:   spv.Bitcast %[[ARG0]] : vector<2xf32> to vector<4xf16>
-//       CHECK:   spv.Bitcast %[[ARG1]] : vector<2xf16> to f32
+//       CHECK:   spirv.Bitcast %[[ARG0]] : vector<2xf32> to vector<4xf16>
+//       CHECK:   spirv.Bitcast %[[ARG1]] : vector<2xf16> to f32
 func.func @bitcast(%arg0 : vector<2xf32>, %arg1: vector<2xf16>) -> (vector<4xf16>, vector<1xf32>) {
   %0 = vector.bitcast %arg0 : vector<2xf32> to vector<4xf16>
   %1 = vector.bitcast %arg1 : vector<2xf16> to vector<1xf32>
@@ -16,18 +16,18 @@ func.func @bitcast(%arg0 : vector<2xf32>, %arg1: vector<2xf16>) -> (vector<4xf16
 
 // -----
 
-module attributes { spv.target_env = #spv.target_env<#spv.vce<v1.0, [Kernel], []>, #spv.resource_limits<>> } {
+module attributes { spirv.target_env = #spirv.target_env<#spirv.vce<v1.0, [Kernel], []>, #spirv.resource_limits<>> } {
 
 // CHECK-LABEL: @cl_fma
 //  CHECK-SAME: %[[A:.*]]: vector<4xf32>, %[[B:.*]]: vector<4xf32>, %[[C:.*]]: vector<4xf32>
-//       CHECK:   spv.CL.fma %[[A]], %[[B]], %[[C]] : vector<4xf32>
+//       CHECK:   spirv.CL.fma %[[A]], %[[B]], %[[C]] : vector<4xf32>
 func.func @cl_fma(%a: vector<4xf32>, %b: vector<4xf32>, %c: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.fma %a, %b, %c: vector<4xf32>
   return %0 : vector<4xf32>
 }
 
 // CHECK-LABEL: @cl_fma_size1_vector
-//       CHECK:   spv.CL.fma %{{.+}} : f32
+//       CHECK:   spirv.CL.fma %{{.+}} : f32
 func.func @cl_fma_size1_vector(%a: vector<1xf32>, %b: vector<1xf32>, %c: vector<1xf32>) -> vector<1xf32> {
   %0 = vector.fma %a, %b, %c: vector<1xf32>
   return %0 : vector<1xf32>
@@ -35,12 +35,12 @@ func.func @cl_fma_size1_vector(%a: vector<1xf32>, %b: vector<1xf32>, %c: vector<
 
 // CHECK-LABEL: func @cl_reduction_maxf
 //  CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
-//       CHECK:   %[[MAX0:.+]] = spv.CL.fmax %[[S0]], %[[S1]]
-//       CHECK:   %[[MAX1:.+]] = spv.CL.fmax %[[MAX0]], %[[S2]]
-//       CHECK:   %[[MAX2:.+]] = spv.CL.fmax %[[MAX1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
+//       CHECK:   %[[MAX0:.+]] = spirv.CL.fmax %[[S0]], %[[S1]]
+//       CHECK:   %[[MAX1:.+]] = spirv.CL.fmax %[[MAX0]], %[[S2]]
+//       CHECK:   %[[MAX2:.+]] = spirv.CL.fmax %[[MAX1]], %[[S]]
 //       CHECK:   return %[[MAX2]]
 func.func @cl_reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 {
   %reduce = vector.reduction <maxf>, %v, %s : vector<3xf32> into f32
@@ -49,12 +49,12 @@ func.func @cl_reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 {
 
 // CHECK-LABEL: func @cl_reduction_minf
 //  CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
-//       CHECK:   %[[MIN0:.+]] = spv.CL.fmin %[[S0]], %[[S1]]
-//       CHECK:   %[[MIN1:.+]] = spv.CL.fmin %[[MIN0]], %[[S2]]
-//       CHECK:   %[[MIN2:.+]] = spv.CL.fmin %[[MIN1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
+//       CHECK:   %[[MIN0:.+]] = spirv.CL.fmin %[[S0]], %[[S1]]
+//       CHECK:   %[[MIN1:.+]] = spirv.CL.fmin %[[MIN0]], %[[S2]]
+//       CHECK:   %[[MIN2:.+]] = spirv.CL.fmin %[[MIN1]], %[[S]]
 //       CHECK:   return %[[MIN2]]
 func.func @cl_reduction_minf(%v : vector<3xf32>, %s: f32) -> f32 {
   %reduce = vector.reduction <minf>, %v, %s : vector<3xf32> into f32
@@ -63,12 +63,12 @@ func.func @cl_reduction_minf(%v : vector<3xf32>, %s: f32) -> f32 {
 
 // CHECK-LABEL: func @cl_reduction_maxsi
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MAX0:.+]] = spv.CL.s_max %[[S0]], %[[S1]]
-//       CHECK:   %[[MAX1:.+]] = spv.CL.s_max %[[MAX0]], %[[S2]]
-//       CHECK:   %[[MAX2:.+]] = spv.CL.s_max %[[MAX1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MAX0:.+]] = spirv.CL.s_max %[[S0]], %[[S1]]
+//       CHECK:   %[[MAX1:.+]] = spirv.CL.s_max %[[MAX0]], %[[S2]]
+//       CHECK:   %[[MAX2:.+]] = spirv.CL.s_max %[[MAX1]], %[[S]]
 //       CHECK:   return %[[MAX2]]
 func.func @cl_reduction_maxsi(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <maxsi>, %v, %s : vector<3xi32> into i32
@@ -77,12 +77,12 @@ func.func @cl_reduction_maxsi(%v : vector<3xi32>, %s: i32) -> i32 {
 
 // CHECK-LABEL: func @cl_reduction_minsi
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MIN0:.+]] = spv.CL.s_min %[[S0]], %[[S1]]
-//       CHECK:   %[[MIN1:.+]] = spv.CL.s_min %[[MIN0]], %[[S2]]
-//       CHECK:   %[[MIN2:.+]] = spv.CL.s_min %[[MIN1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MIN0:.+]] = spirv.CL.s_min %[[S0]], %[[S1]]
+//       CHECK:   %[[MIN1:.+]] = spirv.CL.s_min %[[MIN0]], %[[S2]]
+//       CHECK:   %[[MIN2:.+]] = spirv.CL.s_min %[[MIN1]], %[[S]]
 //       CHECK:   return %[[MIN2]]
 func.func @cl_reduction_minsi(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <minsi>, %v, %s : vector<3xi32> into i32
@@ -91,12 +91,12 @@ func.func @cl_reduction_minsi(%v : vector<3xi32>, %s: i32) -> i32 {
 
 // CHECK-LABEL: func @cl_reduction_maxui
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MAX0:.+]] = spv.CL.u_max %[[S0]], %[[S1]]
-//       CHECK:   %[[MAX1:.+]] = spv.CL.u_max %[[MAX0]], %[[S2]]
-//       CHECK:   %[[MAX2:.+]] = spv.CL.u_max %[[MAX1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MAX0:.+]] = spirv.CL.u_max %[[S0]], %[[S1]]
+//       CHECK:   %[[MAX1:.+]] = spirv.CL.u_max %[[MAX0]], %[[S2]]
+//       CHECK:   %[[MAX2:.+]] = spirv.CL.u_max %[[MAX1]], %[[S]]
 //       CHECK:   return %[[MAX2]]
 func.func @cl_reduction_maxui(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <maxui>, %v, %s : vector<3xi32> into i32
@@ -105,12 +105,12 @@ func.func @cl_reduction_maxui(%v : vector<3xi32>, %s: i32) -> i32 {
 
 // CHECK-LABEL: func @cl_reduction_minui
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MIN0:.+]] = spv.CL.u_min %[[S0]], %[[S1]]
-//       CHECK:   %[[MIN1:.+]] = spv.CL.u_min %[[MIN0]], %[[S2]]
-//       CHECK:   %[[MIN2:.+]] = spv.CL.u_min %[[MIN1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MIN0:.+]] = spirv.CL.u_min %[[S0]], %[[S1]]
+//       CHECK:   %[[MIN1:.+]] = spirv.CL.u_min %[[MIN0]], %[[S2]]
+//       CHECK:   %[[MIN2:.+]] = spirv.CL.u_min %[[MIN1]], %[[S]]
 //       CHECK:   return %[[MIN2]]
 func.func @cl_reduction_minui(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <minui>, %v, %s : vector<3xi32> into i32
@@ -123,8 +123,8 @@ func.func @cl_reduction_minui(%v : vector<3xi32>, %s: i32) -> i32 {
 
 // CHECK-LABEL: @broadcast
 //  CHECK-SAME: %[[A:.*]]: f32
-//       CHECK:   spv.CompositeConstruct %[[A]], %[[A]], %[[A]], %[[A]]
-//       CHECK:   spv.CompositeConstruct %[[A]], %[[A]]
+//       CHECK:   spirv.CompositeConstruct %[[A]], %[[A]], %[[A]], %[[A]]
+//       CHECK:   spirv.CompositeConstruct %[[A]], %[[A]]
 func.func @broadcast(%arg0 : f32) -> (vector<4xf32>, vector<2xf32>) {
   %0 = vector.broadcast %arg0 : f32 to vector<4xf32>
   %1 = vector.broadcast %arg0 : f32 to vector<2xf32>
@@ -135,8 +135,8 @@ func.func @broadcast(%arg0 : f32) -> (vector<4xf32>, vector<2xf32>) {
 
 // CHECK-LABEL: @extract
 //  CHECK-SAME: %[[ARG:.+]]: vector<2xf32>
-//       CHECK:   spv.CompositeExtract %[[ARG]][0 : i32] : vector<2xf32>
-//       CHECK:   spv.CompositeExtract %[[ARG]][1 : i32] : vector<2xf32>
+//       CHECK:   spirv.CompositeExtract %[[ARG]][0 : i32] : vector<2xf32>
+//       CHECK:   spirv.CompositeExtract %[[ARG]][1 : i32] : vector<2xf32>
 func.func @extract(%arg0 : vector<2xf32>) -> (vector<1xf32>, f32) {
   %0 = "vector.extract"(%arg0) {position = [0]} : (vector<2xf32>) -> vector<1xf32>
   %1 = "vector.extract"(%arg0) {position = [1]} : (vector<2xf32>) -> f32
@@ -158,7 +158,7 @@ func.func @extract_size1_vector(%arg0 : vector<1xf32>) -> f32 {
 
 // CHECK-LABEL: @insert
 //  CHECK-SAME: %[[V:.*]]: vector<4xf32>, %[[S:.*]]: f32
-//       CHECK:   spv.CompositeInsert %[[S]], %[[V]][2 : i32] : f32 into vector<4xf32>
+//       CHECK:   spirv.CompositeInsert %[[S]], %[[V]][2 : i32] : f32 into vector<4xf32>
 func.func @insert(%arg0 : vector<4xf32>, %arg1: f32) -> vector<4xf32> {
   %1 = vector.insert %arg1, %arg0[2] : f32 into vector<4xf32>
   return %1: vector<4xf32>
@@ -179,7 +179,7 @@ func.func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: f32) -> vector<1xf3
 
 // CHECK-LABEL: @extract_element
 //  CHECK-SAME: %[[V:.*]]: vector<4xf32>, %[[ID:.*]]: i32
-//       CHECK:   spv.VectorExtractDynamic %[[V]][%[[ID]]] : vector<4xf32>, i32
+//       CHECK:   spirv.VectorExtractDynamic %[[V]][%[[ID]]] : vector<4xf32>, i32
 func.func @extract_element(%arg0 : vector<4xf32>, %id : i32) -> f32 {
   %0 = vector.extractelement %arg0[%id : i32] : vector<4xf32>
   return %0: f32
@@ -229,8 +229,8 @@ func.func @extract_element_0d_vector(%arg0 : f32) -> f32 {
 
 // CHECK-LABEL: @extract_strided_slice
 //  CHECK-SAME: %[[ARG:.+]]: vector<4xf32>
-//       CHECK:   spv.VectorShuffle [1 : i32, 2 : i32] %[[ARG]] : vector<4xf32>, %[[ARG]] : vector<4xf32> -> vector<2xf32>
-//       CHECK:   spv.CompositeExtract %[[ARG]][1 : i32] : vector<4xf32>
+//       CHECK:   spirv.VectorShuffle [1 : i32, 2 : i32] %[[ARG]] : vector<4xf32>, %[[ARG]] : vector<4xf32> -> vector<2xf32>
+//       CHECK:   spirv.CompositeExtract %[[ARG]][1 : i32] : vector<4xf32>
 func.func @extract_strided_slice(%arg0: vector<4xf32>) -> (vector<2xf32>, vector<1xf32>) {
   %0 = vector.extract_strided_slice %arg0 {offsets = [1], sizes = [2], strides = [1]} : vector<4xf32> to vector<2xf32>
   %1 = vector.extract_strided_slice %arg0 {offsets = [1], sizes = [1], strides = [1]} : vector<4xf32> to vector<1xf32>
@@ -241,7 +241,7 @@ func.func @extract_strided_slice(%arg0: vector<4xf32>) -> (vector<2xf32>, vector
 
 // CHECK-LABEL: @insert_element
 //  CHECK-SAME: %[[VAL:.*]]: f32, %[[V:.*]]: vector<4xf32>, %[[ID:.*]]: i32
-//       CHECK:   spv.VectorInsertDynamic %[[VAL]], %[[V]][%[[ID]]] : vector<4xf32>, i32
+//       CHECK:   spirv.VectorInsertDynamic %[[VAL]], %[[V]][%[[ID]]] : vector<4xf32>, i32
 func.func @insert_element(%val: f32, %arg0 : vector<4xf32>, %id : i32) -> vector<4xf32> {
   %0 = vector.insertelement %val, %arg0[%id : i32] : vector<4xf32>
   return %0: vector<4xf32>
@@ -291,7 +291,7 @@ func.func @insert_element_0d_vector(%scalar: f32, %vector : vector<f32>) -> vect
 
 // CHECK-LABEL: @insert_strided_slice
 //  CHECK-SAME: %[[PART:.+]]: vector<2xf32>, %[[ALL:.+]]: vector<4xf32>
-//       CHECK:   spv.VectorShuffle [0 : i32, 4 : i32, 5 : i32, 3 : i32] %[[ALL]] : vector<4xf32>, %[[PART]] : vector<2xf32> -> vector<4xf32>
+//       CHECK:   spirv.VectorShuffle [0 : i32, 4 : i32, 5 : i32, 3 : i32] %[[ALL]] : vector<4xf32>, %[[PART]] : vector<2xf32> -> vector<4xf32>
 func.func @insert_strided_slice(%arg0: vector<2xf32>, %arg1: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.insert_strided_slice %arg0, %arg1 {offsets = [1], strides = [1]} : vector<2xf32> into vector<4xf32>
   return %0 : vector<4xf32>
@@ -302,7 +302,7 @@ func.func @insert_strided_slice(%arg0: vector<2xf32>, %arg1: vector<4xf32>) -> v
 // CHECK-LABEL: @insert_size1_vector
 //  CHECK-SAME: %[[SUB:.*]]: vector<1xf32>, %[[FULL:.*]]: vector<3xf32>
 //       CHECK:   %[[S:.+]] = builtin.unrealized_conversion_cast %[[SUB]]
-//       CHECK:   spv.CompositeInsert %[[S]], %[[FULL]][2 : i32] : f32 into vector<3xf32>
+//       CHECK:   spirv.CompositeInsert %[[S]], %[[FULL]][2 : i32] : f32 into vector<3xf32>
 func.func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: vector<3xf32>) -> vector<3xf32> {
   %1 = vector.insert_strided_slice %arg0, %arg1 {offsets = [2], strides = [1]} : vector<1xf32> into vector<3xf32>
   return %1 : vector<3xf32>
@@ -312,7 +312,7 @@ func.func @insert_size1_vector(%arg0 : vector<1xf32>, %arg1: vector<3xf32>) -> v
 
 // CHECK-LABEL: @fma
 //  CHECK-SAME: %[[A:.*]]: vector<4xf32>, %[[B:.*]]: vector<4xf32>, %[[C:.*]]: vector<4xf32>
-//       CHECK:   spv.GL.Fma %[[A]], %[[B]], %[[C]] : vector<4xf32>
+//       CHECK:   spirv.GL.Fma %[[A]], %[[B]], %[[C]] : vector<4xf32>
 func.func @fma(%a: vector<4xf32>, %b: vector<4xf32>, %c: vector<4xf32>) -> vector<4xf32> {
   %0 = vector.fma %a, %b, %c: vector<4xf32>
   return %0 : vector<4xf32>
@@ -321,7 +321,7 @@ func.func @fma(%a: vector<4xf32>, %b: vector<4xf32>, %c: vector<4xf32>) -> vecto
 // -----
 
 // CHECK-LABEL: @fma_size1_vector
-//       CHECK:   spv.GL.Fma %{{.+}} : f32
+//       CHECK:   spirv.GL.Fma %{{.+}} : f32
 func.func @fma_size1_vector(%a: vector<1xf32>, %b: vector<1xf32>, %c: vector<1xf32>) -> vector<1xf32> {
   %0 = vector.fma %a, %b, %c: vector<1xf32>
   return %0 : vector<1xf32>
@@ -331,7 +331,7 @@ func.func @fma_size1_vector(%a: vector<1xf32>, %b: vector<1xf32>, %c: vector<1xf
 
 // CHECK-LABEL: func @splat
 //  CHECK-SAME: (%[[A:.+]]: f32)
-//       CHECK:   %[[VAL:.+]] = spv.CompositeConstruct %[[A]], %[[A]], %[[A]], %[[A]]
+//       CHECK:   %[[VAL:.+]] = spirv.CompositeConstruct %[[A]], %[[A]], %[[A]], %[[A]]
 //       CHECK:   return %[[VAL]]
 func.func @splat(%f : f32) -> vector<4xf32> {
   %splat = vector.splat %f : vector<4xf32>
@@ -355,7 +355,7 @@ func.func @splat_size1_vector(%f : f32) -> vector<1xf32> {
 //  CHECK-SAME:  %[[ARG0:.+]]: vector<1xf32>, %[[ARG1:.+]]: vector<1xf32>
 //       CHECK:    %[[V0:.+]] = builtin.unrealized_conversion_cast %[[ARG0]]
 //       CHECK:    %[[V1:.+]] = builtin.unrealized_conversion_cast %[[ARG1]]
-//       CHECK:    spv.CompositeConstruct %[[V0]], %[[V1]], %[[V1]], %[[V0]] : (f32, f32, f32, f32) -> vector<4xf32>
+//       CHECK:    spirv.CompositeConstruct %[[V0]], %[[V1]], %[[V1]], %[[V0]] : (f32, f32, f32, f32) -> vector<4xf32>
 func.func @shuffle(%v0 : vector<1xf32>, %v1: vector<1xf32>) -> vector<4xf32> {
   %shuffle = vector.shuffle %v0, %v1 [0, 1, 1, 0] : vector<1xf32>, vector<1xf32>
   return %shuffle : vector<4xf32>
@@ -365,7 +365,7 @@ func.func @shuffle(%v0 : vector<1xf32>, %v1: vector<1xf32>) -> vector<4xf32> {
 
 // CHECK-LABEL:  func @shuffle
 //  CHECK-SAME:  %[[V0:.+]]: vector<3xf32>, %[[V1:.+]]: vector<3xf32>
-//       CHECK:    spv.VectorShuffle [3 : i32, 2 : i32, 5 : i32, 1 : i32] %[[V0]] : vector<3xf32>, %[[V1]] : vector<3xf32> -> vector<4xf32>
+//       CHECK:    spirv.VectorShuffle [3 : i32, 2 : i32, 5 : i32, 1 : i32] %[[V0]] : vector<3xf32>, %[[V1]] : vector<3xf32> -> vector<4xf32>
 func.func @shuffle(%v0 : vector<3xf32>, %v1: vector<3xf32>) -> vector<4xf32> {
   %shuffle = vector.shuffle %v0, %v1 [3, 2, 5, 1] : vector<3xf32>, vector<3xf32>
   return %shuffle : vector<4xf32>
@@ -384,13 +384,13 @@ func.func @shuffle(%v0 : vector<2x16xf32>, %v1: vector<1x16xf32>) -> vector<3x16
 
 // CHECK-LABEL: func @reduction_add
 //  CHECK-SAME: (%[[V:.+]]: vector<4xi32>)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<4xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<4xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<4xi32>
-//       CHECK:   %[[S3:.+]] = spv.CompositeExtract %[[V]][3 : i32] : vector<4xi32>
-//       CHECK:   %[[ADD0:.+]] = spv.IAdd %[[S0]], %[[S1]]
-//       CHECK:   %[[ADD1:.+]] = spv.IAdd %[[ADD0]], %[[S2]]
-//       CHECK:   %[[ADD2:.+]] = spv.IAdd %[[ADD1]], %[[S3]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<4xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<4xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<4xi32>
+//       CHECK:   %[[S3:.+]] = spirv.CompositeExtract %[[V]][3 : i32] : vector<4xi32>
+//       CHECK:   %[[ADD0:.+]] = spirv.IAdd %[[S0]], %[[S1]]
+//       CHECK:   %[[ADD1:.+]] = spirv.IAdd %[[ADD0]], %[[S2]]
+//       CHECK:   %[[ADD2:.+]] = spirv.IAdd %[[ADD1]], %[[S3]]
 //       CHECK:   return %[[ADD2]]
 func.func @reduction_add(%v : vector<4xi32>) -> i32 {
   %reduce = vector.reduction <add>, %v : vector<4xi32> into i32
@@ -401,12 +401,12 @@ func.func @reduction_add(%v : vector<4xi32>) -> i32 {
 
 // CHECK-LABEL: func @reduction_mul
 //  CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
-//       CHECK:   %[[MUL0:.+]] = spv.FMul %[[S0]], %[[S1]]
-//       CHECK:   %[[MUL1:.+]] = spv.FMul %[[MUL0]], %[[S2]]
-//       CHECK:   %[[MUL2:.+]] = spv.FMul %[[MUL1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
+//       CHECK:   %[[MUL0:.+]] = spirv.FMul %[[S0]], %[[S1]]
+//       CHECK:   %[[MUL1:.+]] = spirv.FMul %[[MUL0]], %[[S2]]
+//       CHECK:   %[[MUL2:.+]] = spirv.FMul %[[MUL1]], %[[S]]
 //       CHECK:   return %[[MUL2]]
 func.func @reduction_mul(%v : vector<3xf32>, %s: f32) -> f32 {
   %reduce = vector.reduction <mul>, %v, %s : vector<3xf32> into f32
@@ -417,12 +417,12 @@ func.func @reduction_mul(%v : vector<3xf32>, %s: f32) -> f32 {
 
 // CHECK-LABEL: func @reduction_maxf
 //  CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
-//       CHECK:   %[[MAX0:.+]] = spv.GL.FMax %[[S0]], %[[S1]]
-//       CHECK:   %[[MAX1:.+]] = spv.GL.FMax %[[MAX0]], %[[S2]]
-//       CHECK:   %[[MAX2:.+]] = spv.GL.FMax %[[MAX1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
+//       CHECK:   %[[MAX0:.+]] = spirv.GL.FMax %[[S0]], %[[S1]]
+//       CHECK:   %[[MAX1:.+]] = spirv.GL.FMax %[[MAX0]], %[[S2]]
+//       CHECK:   %[[MAX2:.+]] = spirv.GL.FMax %[[MAX1]], %[[S]]
 //       CHECK:   return %[[MAX2]]
 func.func @reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 {
   %reduce = vector.reduction <maxf>, %v, %s : vector<3xf32> into f32
@@ -433,12 +433,12 @@ func.func @reduction_maxf(%v : vector<3xf32>, %s: f32) -> f32 {
 
 // CHECK-LABEL: func @reduction_minf
 //  CHECK-SAME: (%[[V:.+]]: vector<3xf32>, %[[S:.+]]: f32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
-//       CHECK:   %[[MIN0:.+]] = spv.GL.FMin %[[S0]], %[[S1]]
-//       CHECK:   %[[MIN1:.+]] = spv.GL.FMin %[[MIN0]], %[[S2]]
-//       CHECK:   %[[MIN2:.+]] = spv.GL.FMin %[[MIN1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xf32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xf32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xf32>
+//       CHECK:   %[[MIN0:.+]] = spirv.GL.FMin %[[S0]], %[[S1]]
+//       CHECK:   %[[MIN1:.+]] = spirv.GL.FMin %[[MIN0]], %[[S2]]
+//       CHECK:   %[[MIN2:.+]] = spirv.GL.FMin %[[MIN1]], %[[S]]
 //       CHECK:   return %[[MIN2]]
 func.func @reduction_minf(%v : vector<3xf32>, %s: f32) -> f32 {
   %reduce = vector.reduction <minf>, %v, %s : vector<3xf32> into f32
@@ -449,12 +449,12 @@ func.func @reduction_minf(%v : vector<3xf32>, %s: f32) -> f32 {
 
 // CHECK-LABEL: func @reduction_maxsi
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MAX0:.+]] = spv.GL.SMax %[[S0]], %[[S1]]
-//       CHECK:   %[[MAX1:.+]] = spv.GL.SMax %[[MAX0]], %[[S2]]
-//       CHECK:   %[[MAX2:.+]] = spv.GL.SMax %[[MAX1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MAX0:.+]] = spirv.GL.SMax %[[S0]], %[[S1]]
+//       CHECK:   %[[MAX1:.+]] = spirv.GL.SMax %[[MAX0]], %[[S2]]
+//       CHECK:   %[[MAX2:.+]] = spirv.GL.SMax %[[MAX1]], %[[S]]
 //       CHECK:   return %[[MAX2]]
 func.func @reduction_maxsi(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <maxsi>, %v, %s : vector<3xi32> into i32
@@ -465,12 +465,12 @@ func.func @reduction_maxsi(%v : vector<3xi32>, %s: i32) -> i32 {
 
 // CHECK-LABEL: func @reduction_minsi
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MIN0:.+]] = spv.GL.SMin %[[S0]], %[[S1]]
-//       CHECK:   %[[MIN1:.+]] = spv.GL.SMin %[[MIN0]], %[[S2]]
-//       CHECK:   %[[MIN2:.+]] = spv.GL.SMin %[[MIN1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MIN0:.+]] = spirv.GL.SMin %[[S0]], %[[S1]]
+//       CHECK:   %[[MIN1:.+]] = spirv.GL.SMin %[[MIN0]], %[[S2]]
+//       CHECK:   %[[MIN2:.+]] = spirv.GL.SMin %[[MIN1]], %[[S]]
 //       CHECK:   return %[[MIN2]]
 func.func @reduction_minsi(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <minsi>, %v, %s : vector<3xi32> into i32
@@ -481,12 +481,12 @@ func.func @reduction_minsi(%v : vector<3xi32>, %s: i32) -> i32 {
 
 // CHECK-LABEL: func @reduction_maxui
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MAX0:.+]] = spv.GL.UMax %[[S0]], %[[S1]]
-//       CHECK:   %[[MAX1:.+]] = spv.GL.UMax %[[MAX0]], %[[S2]]
-//       CHECK:   %[[MAX2:.+]] = spv.GL.UMax %[[MAX1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MAX0:.+]] = spirv.GL.UMax %[[S0]], %[[S1]]
+//       CHECK:   %[[MAX1:.+]] = spirv.GL.UMax %[[MAX0]], %[[S2]]
+//       CHECK:   %[[MAX2:.+]] = spirv.GL.UMax %[[MAX1]], %[[S]]
 //       CHECK:   return %[[MAX2]]
 func.func @reduction_maxui(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <maxui>, %v, %s : vector<3xi32> into i32
@@ -497,12 +497,12 @@ func.func @reduction_maxui(%v : vector<3xi32>, %s: i32) -> i32 {
 
 // CHECK-LABEL: func @reduction_minui
 //  CHECK-SAME: (%[[V:.+]]: vector<3xi32>, %[[S:.+]]: i32)
-//       CHECK:   %[[S0:.+]] = spv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
-//       CHECK:   %[[S1:.+]] = spv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
-//       CHECK:   %[[S2:.+]] = spv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
-//       CHECK:   %[[MIN0:.+]] = spv.GL.UMin %[[S0]], %[[S1]]
-//       CHECK:   %[[MIN1:.+]] = spv.GL.UMin %[[MIN0]], %[[S2]]
-//       CHECK:   %[[MIN2:.+]] = spv.GL.UMin %[[MIN1]], %[[S]]
+//       CHECK:   %[[S0:.+]] = spirv.CompositeExtract %[[V]][0 : i32] : vector<3xi32>
+//       CHECK:   %[[S1:.+]] = spirv.CompositeExtract %[[V]][1 : i32] : vector<3xi32>
+//       CHECK:   %[[S2:.+]] = spirv.CompositeExtract %[[V]][2 : i32] : vector<3xi32>
+//       CHECK:   %[[MIN0:.+]] = spirv.GL.UMin %[[S0]], %[[S1]]
+//       CHECK:   %[[MIN1:.+]] = spirv.GL.UMin %[[MIN0]], %[[S2]]
+//       CHECK:   %[[MIN2:.+]] = spirv.GL.UMin %[[MIN1]], %[[S]]
 //       CHECK:   return %[[MIN2]]
 func.func @reduction_minui(%v : vector<3xi32>, %s: i32) -> i32 {
   %reduce = vector.reduction <minui>, %v, %s : vector<3xi32> into i32

diff  --git a/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir b/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
index fb2622d95c30..c59179eda9fd 100644
--- a/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/arithmetic-ops.mlir
@@ -1,54 +1,54 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.FAdd
+// spirv.FAdd
 //===----------------------------------------------------------------------===//
 
 func.func @fadd_scalar(%arg: f32) -> f32 {
-  // CHECK: spv.FAdd
-  %0 = spv.FAdd %arg, %arg : f32
+  // CHECK: spirv.FAdd
+  %0 = spirv.FAdd %arg, %arg : f32
   return %0 : f32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.FDiv
+// spirv.FDiv
 //===----------------------------------------------------------------------===//
 
 func.func @fdiv_scalar(%arg: f32) -> f32 {
-  // CHECK: spv.FDiv
-  %0 = spv.FDiv %arg, %arg : f32
+  // CHECK: spirv.FDiv
+  %0 = spirv.FDiv %arg, %arg : f32
   return %0 : f32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.FMod
+// spirv.FMod
 //===----------------------------------------------------------------------===//
 
 func.func @fmod_scalar(%arg: f32) -> f32 {
-  // CHECK: spv.FMod
-  %0 = spv.FMod %arg, %arg : f32
+  // CHECK: spirv.FMod
+  %0 = spirv.FMod %arg, %arg : f32
   return %0 : f32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.FMul
+// spirv.FMul
 //===----------------------------------------------------------------------===//
 
 func.func @fmul_scalar(%arg: f32) -> f32 {
-  // CHECK: spv.FMul
-  %0 = spv.FMul %arg, %arg : f32
+  // CHECK: spirv.FMul
+  %0 = spirv.FMul %arg, %arg : f32
   return %0 : f32
 }
 
 func.func @fmul_vector(%arg: vector<4xf32>) -> vector<4xf32> {
-  // CHECK: spv.FMul
-  %0 = spv.FMul %arg, %arg : vector<4xf32>
+  // CHECK: spirv.FMul
+  %0 = spirv.FMul %arg, %arg : vector<4xf32>
   return %0 : vector<4xf32>
 }
 
@@ -56,7 +56,7 @@ func.func @fmul_vector(%arg: vector<4xf32>) -> vector<4xf32> {
 
 func.func @fmul_i32(%arg: i32) -> i32 {
   // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
-  %0 = spv.FMul %arg, %arg : i32
+  %0 = spirv.FMul %arg, %arg : i32
   return %0 : i32
 }
 
@@ -64,7 +64,7 @@ func.func @fmul_i32(%arg: i32) -> i32 {
 
 func.func @fmul_bf16(%arg: bf16) -> bf16 {
   // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
-  %0 = spv.FMul %arg, %arg : bf16
+  %0 = spirv.FMul %arg, %arg : bf16
   return %0 : bf16
 }
 
@@ -72,265 +72,265 @@ func.func @fmul_bf16(%arg: bf16) -> bf16 {
 
 func.func @fmul_tensor(%arg: tensor<4xf32>) -> tensor<4xf32> {
   // expected-error @+1 {{operand #0 must be 16/32/64-bit float or vector of 16/32/64-bit float values}}
-  %0 = spv.FMul %arg, %arg : tensor<4xf32>
+  %0 = spirv.FMul %arg, %arg : tensor<4xf32>
   return %0 : tensor<4xf32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.FNegate
+// spirv.FNegate
 //===----------------------------------------------------------------------===//
 
 func.func @fnegate_scalar(%arg: f32) -> f32 {
-  // CHECK: spv.FNegate
-  %0 = spv.FNegate %arg : f32
+  // CHECK: spirv.FNegate
+  %0 = spirv.FNegate %arg : f32
   return %0 : f32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.FRem
+// spirv.FRem
 //===----------------------------------------------------------------------===//
 
 func.func @frem_scalar(%arg: f32) -> f32 {
-  // CHECK: spv.FRem
-  %0 = spv.FRem %arg, %arg : f32
+  // CHECK: spirv.FRem
+  %0 = spirv.FRem %arg, %arg : f32
   return %0 : f32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.FSub
+// spirv.FSub
 //===----------------------------------------------------------------------===//
 
 func.func @fsub_scalar(%arg: f32) -> f32 {
-  // CHECK: spv.FSub
-  %0 = spv.FSub %arg, %arg : f32
+  // CHECK: spirv.FSub
+  %0 = spirv.FSub %arg, %arg : f32
   return %0 : f32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.IAdd
+// spirv.IAdd
 //===----------------------------------------------------------------------===//
 
 func.func @iadd_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.IAdd
-  %0 = spv.IAdd %arg, %arg : i32
+  // CHECK: spirv.IAdd
+  %0 = spirv.IAdd %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.IMul
+// spirv.IMul
 //===----------------------------------------------------------------------===//
 
 func.func @imul_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.IMul
-  %0 = spv.IMul %arg, %arg : i32
+  // CHECK: spirv.IMul
+  %0 = spirv.IMul %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ISub
+// spirv.ISub
 //===----------------------------------------------------------------------===//
 
 func.func @isub_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.ISub
-  %0 = spv.ISub %arg, %arg : i32
+  // CHECK: spirv.ISub
+  %0 = spirv.ISub %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.IAddCarry
+// spirv.IAddCarry
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @iadd_carry_scalar
-func.func @iadd_carry_scalar(%arg: i32) -> !spv.struct<(i32, i32)> {
-  // CHECK: spv.IAddCarry %{{.+}}, %{{.+}} : !spv.struct<(i32, i32)>
-  %0 = spv.IAddCarry %arg, %arg : !spv.struct<(i32, i32)>
-  return %0 : !spv.struct<(i32, i32)>
+func.func @iadd_carry_scalar(%arg: i32) -> !spirv.struct<(i32, i32)> {
+  // CHECK: spirv.IAddCarry %{{.+}}, %{{.+}} : !spirv.struct<(i32, i32)>
+  %0 = spirv.IAddCarry %arg, %arg : !spirv.struct<(i32, i32)>
+  return %0 : !spirv.struct<(i32, i32)>
 }
 
 // CHECK-LABEL: @iadd_carry_vector
-func.func @iadd_carry_vector(%arg: vector<3xi32>) -> !spv.struct<(vector<3xi32>, vector<3xi32>)> {
-  // CHECK: spv.IAddCarry %{{.+}}, %{{.+}} : !spv.struct<(vector<3xi32>, vector<3xi32>)>
-  %0 = spv.IAddCarry %arg, %arg : !spv.struct<(vector<3xi32>, vector<3xi32>)>
-  return %0 : !spv.struct<(vector<3xi32>, vector<3xi32>)>
+func.func @iadd_carry_vector(%arg: vector<3xi32>) -> !spirv.struct<(vector<3xi32>, vector<3xi32>)> {
+  // CHECK: spirv.IAddCarry %{{.+}}, %{{.+}} : !spirv.struct<(vector<3xi32>, vector<3xi32>)>
+  %0 = spirv.IAddCarry %arg, %arg : !spirv.struct<(vector<3xi32>, vector<3xi32>)>
+  return %0 : !spirv.struct<(vector<3xi32>, vector<3xi32>)>
 }
 
 // -----
 
-func.func @iadd_carry(%arg: i32) -> !spv.struct<(i32, i32, i32)> {
-  // expected-error @+1 {{expected spv.struct type with two members}}
-  %0 = spv.IAddCarry %arg, %arg : !spv.struct<(i32, i32, i32)>
-  return %0 : !spv.struct<(i32, i32, i32)>
+func.func @iadd_carry(%arg: i32) -> !spirv.struct<(i32, i32, i32)> {
+  // expected-error @+1 {{expected spirv.struct type with two members}}
+  %0 = spirv.IAddCarry %arg, %arg : !spirv.struct<(i32, i32, i32)>
+  return %0 : !spirv.struct<(i32, i32, i32)>
 }
 
 // -----
 
-func.func @iadd_carry(%arg: i32) -> !spv.struct<(i32)> {
+func.func @iadd_carry(%arg: i32) -> !spirv.struct<(i32)> {
   // expected-error @+1 {{expected result struct type containing two members}}
-  %0 = "spv.IAddCarry"(%arg, %arg): (i32, i32) -> !spv.struct<(i32)>
-  return %0 : !spv.struct<(i32)>
+  %0 = "spirv.IAddCarry"(%arg, %arg): (i32, i32) -> !spirv.struct<(i32)>
+  return %0 : !spirv.struct<(i32)>
 }
 
 // -----
 
-func.func @iadd_carry(%arg: i32) -> !spv.struct<(i32, i64)> {
+func.func @iadd_carry(%arg: i32) -> !spirv.struct<(i32, i64)> {
   // expected-error @+1 {{expected all operand types and struct member types are the same}}
-  %0 = "spv.IAddCarry"(%arg, %arg): (i32, i32) -> !spv.struct<(i32, i64)>
-  return %0 : !spv.struct<(i32, i64)>
+  %0 = "spirv.IAddCarry"(%arg, %arg): (i32, i32) -> !spirv.struct<(i32, i64)>
+  return %0 : !spirv.struct<(i32, i64)>
 }
 
 // -----
 
-func.func @iadd_carry(%arg: i64) -> !spv.struct<(i32, i32)> {
+func.func @iadd_carry(%arg: i64) -> !spirv.struct<(i32, i32)> {
   // expected-error @+1 {{expected all operand types and struct member types are the same}}
-  %0 = "spv.IAddCarry"(%arg, %arg): (i64, i64) -> !spv.struct<(i32, i32)>
-  return %0 : !spv.struct<(i32, i32)>
+  %0 = "spirv.IAddCarry"(%arg, %arg): (i64, i64) -> !spirv.struct<(i32, i32)>
+  return %0 : !spirv.struct<(i32, i32)>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ISubBorrow
+// spirv.ISubBorrow
 //===----------------------------------------------------------------------===//
 
 // CHECK-LABEL: @isub_borrow_scalar
-func.func @isub_borrow_scalar(%arg: i32) -> !spv.struct<(i32, i32)> {
-  // CHECK: spv.ISubBorrow %{{.+}}, %{{.+}} : !spv.struct<(i32, i32)>
-  %0 = spv.ISubBorrow %arg, %arg : !spv.struct<(i32, i32)>
-  return %0 : !spv.struct<(i32, i32)>
+func.func @isub_borrow_scalar(%arg: i32) -> !spirv.struct<(i32, i32)> {
+  // CHECK: spirv.ISubBorrow %{{.+}}, %{{.+}} : !spirv.struct<(i32, i32)>
+  %0 = spirv.ISubBorrow %arg, %arg : !spirv.struct<(i32, i32)>
+  return %0 : !spirv.struct<(i32, i32)>
 }
 
 // CHECK-LABEL: @isub_borrow_vector
-func.func @isub_borrow_vector(%arg: vector<3xi32>) -> !spv.struct<(vector<3xi32>, vector<3xi32>)> {
-  // CHECK: spv.ISubBorrow %{{.+}}, %{{.+}} : !spv.struct<(vector<3xi32>, vector<3xi32>)>
-  %0 = spv.ISubBorrow %arg, %arg : !spv.struct<(vector<3xi32>, vector<3xi32>)>
-  return %0 : !spv.struct<(vector<3xi32>, vector<3xi32>)>
+func.func @isub_borrow_vector(%arg: vector<3xi32>) -> !spirv.struct<(vector<3xi32>, vector<3xi32>)> {
+  // CHECK: spirv.ISubBorrow %{{.+}}, %{{.+}} : !spirv.struct<(vector<3xi32>, vector<3xi32>)>
+  %0 = spirv.ISubBorrow %arg, %arg : !spirv.struct<(vector<3xi32>, vector<3xi32>)>
+  return %0 : !spirv.struct<(vector<3xi32>, vector<3xi32>)>
 }
 
 // -----
 
-func.func @isub_borrow(%arg: i32) -> !spv.struct<(i32, i32, i32)> {
-  // expected-error @+1 {{expected spv.struct type with two members}}
-  %0 = spv.ISubBorrow %arg, %arg : !spv.struct<(i32, i32, i32)>
-  return %0 : !spv.struct<(i32, i32, i32)>
+func.func @isub_borrow(%arg: i32) -> !spirv.struct<(i32, i32, i32)> {
+  // expected-error @+1 {{expected spirv.struct type with two members}}
+  %0 = spirv.ISubBorrow %arg, %arg : !spirv.struct<(i32, i32, i32)>
+  return %0 : !spirv.struct<(i32, i32, i32)>
 }
 
 // -----
 
-func.func @isub_borrow(%arg: i32) -> !spv.struct<(i32)> {
+func.func @isub_borrow(%arg: i32) -> !spirv.struct<(i32)> {
   // expected-error @+1 {{expected result struct type containing two members}}
-  %0 = "spv.ISubBorrow"(%arg, %arg): (i32, i32) -> !spv.struct<(i32)>
-  return %0 : !spv.struct<(i32)>
+  %0 = "spirv.ISubBorrow"(%arg, %arg): (i32, i32) -> !spirv.struct<(i32)>
+  return %0 : !spirv.struct<(i32)>
 }
 
 // -----
 
-func.func @isub_borrow(%arg: i32) -> !spv.struct<(i32, i64)> {
+func.func @isub_borrow(%arg: i32) -> !spirv.struct<(i32, i64)> {
   // expected-error @+1 {{expected all operand types and struct member types are the same}}
-  %0 = "spv.ISubBorrow"(%arg, %arg): (i32, i32) -> !spv.struct<(i32, i64)>
-  return %0 : !spv.struct<(i32, i64)>
+  %0 = "spirv.ISubBorrow"(%arg, %arg): (i32, i32) -> !spirv.struct<(i32, i64)>
+  return %0 : !spirv.struct<(i32, i64)>
 }
 
 // -----
 
-func.func @isub_borrow(%arg: i64) -> !spv.struct<(i32, i32)> {
+func.func @isub_borrow(%arg: i64) -> !spirv.struct<(i32, i32)> {
   // expected-error @+1 {{expected all operand types and struct member types are the same}}
-  %0 = "spv.ISubBorrow"(%arg, %arg): (i64, i64) -> !spv.struct<(i32, i32)>
-  return %0 : !spv.struct<(i32, i32)>
+  %0 = "spirv.ISubBorrow"(%arg, %arg): (i64, i64) -> !spirv.struct<(i32, i32)>
+  return %0 : !spirv.struct<(i32, i32)>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.SDiv
+// spirv.SDiv
 //===----------------------------------------------------------------------===//
 
 func.func @sdiv_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.SDiv
-  %0 = spv.SDiv %arg, %arg : i32
+  // CHECK: spirv.SDiv
+  %0 = spirv.SDiv %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.SMod
+// spirv.SMod
 //===----------------------------------------------------------------------===//
 
 func.func @smod_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.SMod
-  %0 = spv.SMod %arg, %arg : i32
+  // CHECK: spirv.SMod
+  %0 = spirv.SMod %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.SNegate
+// spirv.SNegate
 //===----------------------------------------------------------------------===//
 
 func.func @snegate_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.SNegate
-  %0 = spv.SNegate %arg : i32
+  // CHECK: spirv.SNegate
+  %0 = spirv.SNegate %arg : i32
   return %0 : i32
 }
 
 // -----
 //===----------------------------------------------------------------------===//
-// spv.SRem
+// spirv.SRem
 //===----------------------------------------------------------------------===//
 
 func.func @srem_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.SRem
-  %0 = spv.SRem %arg, %arg : i32
+  // CHECK: spirv.SRem
+  %0 = spirv.SRem %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.UDiv
+// spirv.UDiv
 //===----------------------------------------------------------------------===//
 
 func.func @udiv_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.UDiv
-  %0 = spv.UDiv %arg, %arg : i32
+  // CHECK: spirv.UDiv
+  %0 = spirv.UDiv %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.UMod
+// spirv.UMod
 //===----------------------------------------------------------------------===//
 
 func.func @umod_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.UMod
-  %0 = spv.UMod %arg, %arg : i32
+  // CHECK: spirv.UMod
+  %0 = spirv.UMod %arg, %arg : i32
   return %0 : i32
 }
 
 // -----
 //===----------------------------------------------------------------------===//
-// spv.VectorTimesScalar
+// spirv.VectorTimesScalar
 //===----------------------------------------------------------------------===//
 
 func.func @vector_times_scalar(%vector: vector<4xf32>, %scalar: f32) -> vector<4xf32> {
-  // CHECK: spv.VectorTimesScalar %{{.+}}, %{{.+}} : (vector<4xf32>, f32) -> vector<4xf32>
-  %0 = spv.VectorTimesScalar %vector, %scalar : (vector<4xf32>, f32) -> vector<4xf32>
+  // CHECK: spirv.VectorTimesScalar %{{.+}}, %{{.+}} : (vector<4xf32>, f32) -> vector<4xf32>
+  %0 = spirv.VectorTimesScalar %vector, %scalar : (vector<4xf32>, f32) -> vector<4xf32>
   return %0 : vector<4xf32>
 }
 
@@ -338,7 +338,7 @@ func.func @vector_times_scalar(%vector: vector<4xf32>, %scalar: f32) -> vector<4
 
 func.func @vector_times_scalar(%vector: vector<4xf32>, %scalar: f16) -> vector<4xf32> {
   // expected-error @+1 {{scalar operand and result element type match}}
-  %0 = spv.VectorTimesScalar %vector, %scalar : (vector<4xf32>, f16) -> vector<4xf32>
+  %0 = spirv.VectorTimesScalar %vector, %scalar : (vector<4xf32>, f16) -> vector<4xf32>
   return %0 : vector<4xf32>
 }
 
@@ -346,6 +346,6 @@ func.func @vector_times_scalar(%vector: vector<4xf32>, %scalar: f16) -> vector<4
 
 func.func @vector_times_scalar(%vector: vector<4xf32>, %scalar: f32) -> vector<3xf32> {
   // expected-error @+1 {{vector operand and result type mismatch}}
-  %0 = spv.VectorTimesScalar %vector, %scalar : (vector<4xf32>, f32) -> vector<3xf32>
+  %0 = spirv.VectorTimesScalar %vector, %scalar : (vector<4xf32>, f32) -> vector<3xf32>
   return %0 : vector<3xf32>
 }

diff  --git a/mlir/test/Dialect/SPIRV/IR/asm-op-interface.mlir b/mlir/test/Dialect/SPIRV/IR/asm-op-interface.mlir
index e45fa81f0c46..c04112d9d11b 100644
--- a/mlir/test/Dialect/SPIRV/IR/asm-op-interface.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/asm-op-interface.mlir
@@ -2,40 +2,40 @@
 
 func.func @const() -> () {
   // CHECK: %true
-  %0 = spv.Constant true
+  %0 = spirv.Constant true
   // CHECK: %false
-  %1 = spv.Constant false
+  %1 = spirv.Constant false
 
   // CHECK: %cst42_i32
-  %2 = spv.Constant 42 : i32
+  %2 = spirv.Constant 42 : i32
   // CHECK: %cst-42_i32
-  %-2 = spv.Constant -42 : i32
+  %-2 = spirv.Constant -42 : i32
   // CHECK: %cst43_i64
-  %3 = spv.Constant 43 : i64
+  %3 = spirv.Constant 43 : i64
 
   // CHECK: %cst_f32
-  %4 = spv.Constant 0.5 : f32
+  %4 = spirv.Constant 0.5 : f32
   // CHECK: %cst_f64
-  %5 = spv.Constant 0.5 : f64
+  %5 = spirv.Constant 0.5 : f64
 
   // CHECK: %cst_vec_3xi32 
-  %6 = spv.Constant dense<[1, 2, 3]> : vector<3xi32>
+  %6 = spirv.Constant dense<[1, 2, 3]> : vector<3xi32>
 
   // CHECK: %cst
-  %8 = spv.Constant [dense<3.0> : vector<2xf32>] : !spv.array<1xvector<2xf32>>
+  %8 = spirv.Constant [dense<3.0> : vector<2xf32>] : !spirv.array<1xvector<2xf32>>
 
   return
 }
 
 // -----
 
-spv.module Logical GLSL450 {
-  spv.GlobalVariable @global_var : !spv.ptr<f32, Input>
+spirv.module Logical GLSL450 {
+  spirv.GlobalVariable @global_var : !spirv.ptr<f32, Input>
 
-  spv.func @addressof() -> () "None" {
-    // CHECK: %global_var_addr = spv.mlir.addressof 
-    %0 = spv.mlir.addressof @global_var : !spv.ptr<f32, Input>
-    spv.Return
+  spirv.func @addressof() -> () "None" {
+    // CHECK: %global_var_addr = spirv.mlir.addressof 
+    %0 = spirv.mlir.addressof @global_var : !spirv.ptr<f32, Input>
+    spirv.Return
   }
 }
 

diff  --git a/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir b/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir
index 30732d84fc0a..ed0e19534c17 100644
--- a/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/atomic-ops.mlir
@@ -1,274 +1,274 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicAnd
+// spirv.AtomicAnd
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_and(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicAnd "Device" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicAnd "Device" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicAnd "Device" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicAnd "Device" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 // -----
 
-func.func @atomic_and(%ptr : !spv.ptr<f32, StorageBuffer>, %value : i32) -> i32 {
+func.func @atomic_and(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : i32) -> i32 {
   // expected-error @+1 {{pointer operand must point to an integer value, found 'f32'}}
-  %0 = "spv.AtomicAnd"(%ptr, %value) {memory_scope = #spv.scope<Workgroup>, semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<f32, StorageBuffer>, i32) -> (i32)
+  %0 = "spirv.AtomicAnd"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<f32, StorageBuffer>, i32) -> (i32)
   return %0 : i32
 }
 
 
 // -----
 
-func.func @atomic_and(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i64) -> i64 {
+func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i64) -> i64 {
   // expected-error @+1 {{expected value to have the same type as the pointer operand's pointee type 'i32', but found 'i64'}}
-  %0 = "spv.AtomicAnd"(%ptr, %value) {memory_scope = #spv.scope<Workgroup>, semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i32, StorageBuffer>, i64) -> (i64)
+  %0 = "spirv.AtomicAnd"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, StorageBuffer>, i64) -> (i64)
   return %0 : i64
 }
 
 // -----
 
-func.func @atomic_and(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+func.func @atomic_and(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
   // expected-error @+1 {{expected at most one of these four memory constraints to be set: `Acquire`, `Release`,`AcquireRelease` or `SequentiallyConsistent`}}
-  %0 = spv.AtomicAnd "Device" "Acquire|Release" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicAnd "Device" "Acquire|Release" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicCompareExchange
+// spirv.AtomicCompareExchange
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_compare_exchange(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
-  // CHECK: spv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spv.ptr<i32, Workgroup>
-  %0 = spv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spv.ptr<i32, Workgroup>
+func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
+  // CHECK: spirv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+  %0 = spirv.AtomicCompareExchange "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_compare_exchange(%ptr: !spv.ptr<i32, Workgroup>, %value: i64, %comparator: i32) -> i32 {
+func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64, %comparator: i32) -> i32 {
   // expected-error @+1 {{value operand must have the same type as the op result, but found 'i64' vs 'i32'}}
-  %0 = "spv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spv.scope<Workgroup>, equal_semantics = #spv.memory_semantics<AcquireRelease>, unequal_semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i32, Workgroup>, i64, i32) -> (i32)
+  %0 = "spirv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i64, i32) -> (i32)
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_compare_exchange(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i16) -> i32 {
+func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i16) -> i32 {
   // expected-error @+1 {{comparator operand must have the same type as the op result, but found 'i16' vs 'i32'}}
-  %0 = "spv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spv.scope<Workgroup>, equal_semantics = #spv.memory_semantics<AcquireRelease>, unequal_semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i32, Workgroup>, i32, i16) -> (i32)
+  %0 = "spirv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i32, i16) -> (i32)
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_compare_exchange(%ptr: !spv.ptr<i64, Workgroup>, %value: i32, %comparator: i32) -> i32 {
+func.func @atomic_compare_exchange(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32, %comparator: i32) -> i32 {
   // expected-error @+1 {{pointer operand's pointee type must have the same as the op result type, but found 'i64' vs 'i32'}}
-  %0 = "spv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spv.scope<Workgroup>, equal_semantics = #spv.memory_semantics<AcquireRelease>, unequal_semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i64, Workgroup>, i32, i32) -> (i32)
+  %0 = "spirv.AtomicCompareExchange"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i64, Workgroup>, i32, i32) -> (i32)
   return %0: i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicCompareExchangeWeak
+// spirv.AtomicCompareExchangeWeak
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_compare_exchange_weak(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
-  // CHECK: spv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spv.ptr<i32, Workgroup>
-  %0 = spv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spv.ptr<i32, Workgroup>
+func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
+  // CHECK: spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %{{.*}}, %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+  %0 = spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_compare_exchange_weak(%ptr: !spv.ptr<i32, Workgroup>, %value: i64, %comparator: i32) -> i32 {
+func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64, %comparator: i32) -> i32 {
   // expected-error @+1 {{value operand must have the same type as the op result, but found 'i64' vs 'i32'}}
-  %0 = "spv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spv.scope<Workgroup>, equal_semantics = #spv.memory_semantics<AcquireRelease>, unequal_semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i32, Workgroup>, i64, i32) -> (i32)
+  %0 = "spirv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i64, i32) -> (i32)
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_compare_exchange_weak(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i16) -> i32 {
+func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i16) -> i32 {
   // expected-error @+1 {{comparator operand must have the same type as the op result, but found 'i16' vs 'i32'}}
-  %0 = "spv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spv.scope<Workgroup>, equal_semantics = #spv.memory_semantics<AcquireRelease>, unequal_semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i32, Workgroup>, i32, i16) -> (i32)
+  %0 = "spirv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i32, i16) -> (i32)
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_compare_exchange_weak(%ptr: !spv.ptr<i64, Workgroup>, %value: i32, %comparator: i32) -> i32 {
+func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32, %comparator: i32) -> i32 {
   // expected-error @+1 {{pointer operand's pointee type must have the same as the op result type, but found 'i64' vs 'i32'}}
-  %0 = "spv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spv.scope<Workgroup>, equal_semantics = #spv.memory_semantics<AcquireRelease>, unequal_semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i64, Workgroup>, i32, i32) -> (i32)
+  %0 = "spirv.AtomicCompareExchangeWeak"(%ptr, %value, %comparator) {memory_scope = #spirv.scope<Workgroup>, equal_semantics = #spirv.memory_semantics<AcquireRelease>, unequal_semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i64, Workgroup>, i32, i32) -> (i32)
   return %0: i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicExchange
+// spirv.AtomicExchange
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_exchange(%ptr: !spv.ptr<i32, Workgroup>, %value: i32) -> i32 {
-  // CHECK: spv.AtomicExchange "Workgroup" "Release" %{{.*}}, %{{.*}} : !spv.ptr<i32, Workgroup>
-  %0 = spv.AtomicExchange "Workgroup" "Release" %ptr, %value: !spv.ptr<i32, Workgroup>
+func.func @atomic_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32) -> i32 {
+  // CHECK: spirv.AtomicExchange "Workgroup" "Release" %{{.*}}, %{{.*}} : !spirv.ptr<i32, Workgroup>
+  %0 = spirv.AtomicExchange "Workgroup" "Release" %ptr, %value: !spirv.ptr<i32, Workgroup>
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_exchange(%ptr: !spv.ptr<i32, Workgroup>, %value: i64) -> i32 {
+func.func @atomic_exchange(%ptr: !spirv.ptr<i32, Workgroup>, %value: i64) -> i32 {
   // expected-error @+1 {{value operand must have the same type as the op result, but found 'i64' vs 'i32'}}
-  %0 = "spv.AtomicExchange"(%ptr, %value) {memory_scope = #spv.scope<Workgroup>, semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i32, Workgroup>, i64) -> (i32)
+  %0 = "spirv.AtomicExchange"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, Workgroup>, i64) -> (i32)
   return %0: i32
 }
 
 // -----
 
-func.func @atomic_exchange(%ptr: !spv.ptr<i64, Workgroup>, %value: i32) -> i32 {
+func.func @atomic_exchange(%ptr: !spirv.ptr<i64, Workgroup>, %value: i32) -> i32 {
   // expected-error @+1 {{pointer operand's pointee type must have the same as the op result type, but found 'i64' vs 'i32'}}
-  %0 = "spv.AtomicExchange"(%ptr, %value) {memory_scope = #spv.scope<Workgroup>, semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i64, Workgroup>, i32) -> (i32)
+  %0 = "spirv.AtomicExchange"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i64, Workgroup>, i32) -> (i32)
   return %0: i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicIAdd
+// spirv.AtomicIAdd
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_iadd(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicIAdd "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicIAdd "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_iadd(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicIAdd "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicIAdd "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicIDecrement
+// spirv.AtomicIDecrement
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_idecrement(%ptr : !spv.ptr<i32, StorageBuffer>) -> i32 {
-  // CHECK: spv.AtomicIDecrement "Workgroup" "None" %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicIDecrement "Workgroup" "None" %ptr : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_idecrement(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 {
+  // CHECK: spirv.AtomicIDecrement "Workgroup" "None" %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicIDecrement "Workgroup" "None" %ptr : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicIIncrement
+// spirv.AtomicIIncrement
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_iincrement(%ptr : !spv.ptr<i32, StorageBuffer>) -> i32 {
-  // CHECK: spv.AtomicIIncrement "Workgroup" "None" %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicIIncrement "Workgroup" "None" %ptr : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_iincrement(%ptr : !spirv.ptr<i32, StorageBuffer>) -> i32 {
+  // CHECK: spirv.AtomicIIncrement "Workgroup" "None" %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicIIncrement "Workgroup" "None" %ptr : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicISub
+// spirv.AtomicISub
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_isub(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicISub "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicISub "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_isub(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicISub "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicISub "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicOr
+// spirv.AtomicOr
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_or(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicOr "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicOr "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_or(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicOr "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicOr "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicSMax
+// spirv.AtomicSMax
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_smax(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicSMax "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicSMax "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_smax(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicSMax "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicSMax "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicSMin
+// spirv.AtomicSMin
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_smin(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicSMin "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicSMin "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_smin(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicSMin "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicSMin "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicUMax
+// spirv.AtomicUMax
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_umax(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicUMax "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicUMax "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_umax(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicUMax "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicUMax "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicUMin
+// spirv.AtomicUMin
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_umin(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicUMin "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicUMin "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_umin(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicUMin "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicUMin "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 //===----------------------------------------------------------------------===//
-// spv.AtomicXor
+// spirv.AtomicXor
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_xor(%ptr : !spv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
-  // CHECK: spv.AtomicXor "Workgroup" "None" %{{.*}}, %{{.*}} : !spv.ptr<i32, StorageBuffer>
-  %0 = spv.AtomicXor "Workgroup" "None" %ptr, %value : !spv.ptr<i32, StorageBuffer>
+func.func @atomic_xor(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : i32) -> i32 {
+  // CHECK: spirv.AtomicXor "Workgroup" "None" %{{.*}}, %{{.*}} : !spirv.ptr<i32, StorageBuffer>
+  %0 = spirv.AtomicXor "Workgroup" "None" %ptr, %value : !spirv.ptr<i32, StorageBuffer>
   return %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.EXT.AtomicFAdd
+// spirv.EXT.AtomicFAdd
 //===----------------------------------------------------------------------===//
 
-func.func @atomic_fadd(%ptr : !spv.ptr<f32, StorageBuffer>, %value : f32) -> f32 {
-  // CHECK: spv.EXT.AtomicFAdd "Device" "None" %{{.*}}, %{{.*}} : !spv.ptr<f32, StorageBuffer>
-  %0 = spv.EXT.AtomicFAdd "Device" "None" %ptr, %value : !spv.ptr<f32, StorageBuffer>
+func.func @atomic_fadd(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : f32) -> f32 {
+  // CHECK: spirv.EXT.AtomicFAdd "Device" "None" %{{.*}}, %{{.*}} : !spirv.ptr<f32, StorageBuffer>
+  %0 = spirv.EXT.AtomicFAdd "Device" "None" %ptr, %value : !spirv.ptr<f32, StorageBuffer>
   return %0 : f32
 }
 
 // -----
 
-func.func @atomic_fadd(%ptr : !spv.ptr<i32, StorageBuffer>, %value : f32) -> f32 {
+func.func @atomic_fadd(%ptr : !spirv.ptr<i32, StorageBuffer>, %value : f32) -> f32 {
   // expected-error @+1 {{pointer operand must point to an float value, found 'i32'}}
-  %0 = "spv.EXT.AtomicFAdd"(%ptr, %value) {memory_scope = #spv.scope<Workgroup>, semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<i32, StorageBuffer>, f32) -> (f32)
+  %0 = "spirv.EXT.AtomicFAdd"(%ptr, %value) {memory_scope = #spirv.scope<Workgroup>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<i32, StorageBuffer>, f32) -> (f32)
   return %0 : f32
 }
 
 // -----
 
-func.func @atomic_fadd(%ptr : !spv.ptr<f32, StorageBuffer>, %value : f64) -> f64 {
+func.func @atomic_fadd(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : f64) -> f64 {
   // expected-error @+1 {{expected value to have the same type as the pointer operand's pointee type 'f32', but found 'f64'}}
-  %0 = "spv.EXT.AtomicFAdd"(%ptr, %value) {memory_scope = #spv.scope<Device>, semantics = #spv.memory_semantics<AcquireRelease>} : (!spv.ptr<f32, StorageBuffer>, f64) -> (f64)
+  %0 = "spirv.EXT.AtomicFAdd"(%ptr, %value) {memory_scope = #spirv.scope<Device>, semantics = #spirv.memory_semantics<AcquireRelease>} : (!spirv.ptr<f32, StorageBuffer>, f64) -> (f64)
   return %0 : f64
 }
 
 // -----
 
-func.func @atomic_fadd(%ptr : !spv.ptr<f32, StorageBuffer>, %value : f32) -> f32 {
+func.func @atomic_fadd(%ptr : !spirv.ptr<f32, StorageBuffer>, %value : f32) -> f32 {
   // expected-error @+1 {{expected at most one of these four memory constraints to be set: `Acquire`, `Release`,`AcquireRelease` or `SequentiallyConsistent`}}
-  %0 = spv.EXT.AtomicFAdd "Device" "Acquire|Release" %ptr, %value : !spv.ptr<f32, StorageBuffer>
+  %0 = spirv.EXT.AtomicFAdd "Device" "Acquire|Release" %ptr, %value : !spirv.ptr<f32, StorageBuffer>
   return %0 : f32
 }

diff  --git a/mlir/test/Dialect/SPIRV/IR/availability.mlir b/mlir/test/Dialect/SPIRV/IR/availability.mlir
index 810fe53faa95..290e07de41d5 100644
--- a/mlir/test/Dialect/SPIRV/IR/availability.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/availability.mlir
@@ -6,17 +6,17 @@ func.func @iadd(%arg: i32) -> i32 {
   // CHECK: max version: v1.6
   // CHECK: extensions: [ ]
   // CHECK: capabilities: [ ]
-  %0 = spv.IAdd %arg, %arg: i32
+  %0 = spirv.IAdd %arg, %arg: i32
   return %0: i32
 }
 
 // CHECK: atomic_compare_exchange_weak
-func.func @atomic_compare_exchange_weak(%ptr: !spv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
+func.func @atomic_compare_exchange_weak(%ptr: !spirv.ptr<i32, Workgroup>, %value: i32, %comparator: i32) -> i32 {
   // CHECK: min version: v1.0
   // CHECK: max version: v1.3
   // CHECK: extensions: [ ]
   // CHECK: capabilities: [ [Kernel] ]
-  %0 = spv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spv.ptr<i32, Workgroup>
+  %0 = spirv.AtomicCompareExchangeWeak "Workgroup" "Release" "Acquire" %ptr, %value, %comparator: !spirv.ptr<i32, Workgroup>
   return %0: i32
 }
 
@@ -26,26 +26,26 @@ func.func @subgroup_ballot(%predicate: i1) -> vector<4xi32> {
   // CHECK: max version: v1.6
   // CHECK: extensions: [ ]
   // CHECK: capabilities: [ [GroupNonUniformBallot] ]
-  %0 = spv.GroupNonUniformBallot <Workgroup> %predicate : vector<4xi32>
+  %0 = spirv.GroupNonUniformBallot <Workgroup> %predicate : vector<4xi32>
   return %0: vector<4xi32>
 }
 
 // CHECK-LABEL: module_logical_glsl450
 func.func @module_logical_glsl450() {
-  // CHECK: spv.module min version: v1.0
-  // CHECK: spv.module max version: v1.6
-  // CHECK: spv.module extensions: [ ]
-  // CHECK: spv.module capabilities: [ [Shader] ]
-  spv.module Logical GLSL450 { }
+  // CHECK: spirv.module min version: v1.0
+  // CHECK: spirv.module max version: v1.6
+  // CHECK: spirv.module extensions: [ ]
+  // CHECK: spirv.module capabilities: [ [Shader] ]
+  spirv.module Logical GLSL450 { }
   return
 }
 
 // CHECK-LABEL: module_physical_storage_buffer64_vulkan
 func.func @module_physical_storage_buffer64_vulkan() {
-  // CHECK: spv.module min version: v1.0
-  // CHECK: spv.module max version: v1.6
-  // CHECK: spv.module extensions: [ [SPV_EXT_physical_storage_buffer, SPV_KHR_physical_storage_buffer] [SPV_KHR_vulkan_memory_model] ]
-  // CHECK: spv.module capabilities: [ [PhysicalStorageBufferAddresses] [VulkanMemoryModel] ]
-  spv.module PhysicalStorageBuffer64 Vulkan { }
+  // CHECK: spirv.module min version: v1.0
+  // CHECK: spirv.module max version: v1.6
+  // CHECK: spirv.module extensions: [ [SPV_EXT_physical_storage_buffer, SPV_KHR_physical_storage_buffer] [SPV_KHR_vulkan_memory_model] ]
+  // CHECK: spirv.module capabilities: [ [PhysicalStorageBufferAddresses] [VulkanMemoryModel] ]
+  spirv.module PhysicalStorageBuffer64 Vulkan { }
   return
 }

diff  --git a/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir b/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
index 931426f32848..0be0d0204b7c 100644
--- a/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/barrier-ops.mlir
@@ -1,12 +1,12 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.ControlBarrier
+// spirv.ControlBarrier
 //===----------------------------------------------------------------------===//
 
 func.func @control_barrier_0() -> () {
-  // CHECK: spv.ControlBarrier <Workgroup>, <Device>, <Acquire|UniformMemory>
-  spv.ControlBarrier <Workgroup>, <Device>, <Acquire|UniformMemory>
+  // CHECK: spirv.ControlBarrier <Workgroup>, <Device>, <Acquire|UniformMemory>
+  spirv.ControlBarrier <Workgroup>, <Device>, <Acquire|UniformMemory>
   return
 }
 
@@ -15,7 +15,7 @@ func.func @control_barrier_0() -> () {
 func.func @control_barrier_1() -> () {
   // expected-error @+2 {{to be one of}}
   // expected-error @+1 {{failed to parse SPV_ScopeAttr}}
-  spv.ControlBarrier <Something>, <Device>, <Acquire|UniformMemory>
+  spirv.ControlBarrier <Something>, <Device>, <Acquire|UniformMemory>
   return
 }
 
@@ -23,20 +23,20 @@ func.func @control_barrier_1() -> () {
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.MemoryBarrier
+// spirv.MemoryBarrier
 //===----------------------------------------------------------------------===//
 
 func.func @memory_barrier_0() -> () {
-  // CHECK: spv.MemoryBarrier <Device>, <Acquire|UniformMemory>
-  spv.MemoryBarrier <Device>, <Acquire|UniformMemory>
+  // CHECK: spirv.MemoryBarrier <Device>, <Acquire|UniformMemory>
+  spirv.MemoryBarrier <Device>, <Acquire|UniformMemory>
   return
 }
 
 // -----
 
 func.func @memory_barrier_1() -> () {
-  // CHECK: spv.MemoryBarrier <Workgroup>, <Acquire>
-  spv.MemoryBarrier <Workgroup>, <Acquire>
+  // CHECK: spirv.MemoryBarrier <Workgroup>, <Acquire>
+  spirv.MemoryBarrier <Workgroup>, <Acquire>
   return
 }
 
@@ -44,7 +44,7 @@ func.func @memory_barrier_1() -> () {
 
 func.func @memory_barrier_2() -> () {
  // expected-error @+1 {{expected at most one of these four memory constraints to be set: `Acquire`, `Release`,`AcquireRelease` or `SequentiallyConsistent`}}
-  spv.MemoryBarrier <Device>, <Acquire|Release>
+  spirv.MemoryBarrier <Device>, <Acquire|Release>
   return
 }
 

diff  --git a/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir b/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
index 519b17766ad9..eeaa607b5604 100644
--- a/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/bit-ops.mlir
@@ -1,25 +1,25 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.BitCount
+// spirv.BitCount
 //===----------------------------------------------------------------------===//
 
 func.func @bitcount(%arg: i32) -> i32 {
-  // CHECK: spv.BitCount {{%.*}} : i32
-  %0 = spv.BitCount %arg : i32
-  spv.ReturnValue %0 : i32
+  // CHECK: spirv.BitCount {{%.*}} : i32
+  %0 = spirv.BitCount %arg : i32
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.BitFieldInsert
+// spirv.BitFieldInsert
 //===----------------------------------------------------------------------===//
 
 func.func @bit_field_insert_vec(%base: vector<3xi32>, %insert: vector<3xi32>, %offset: i32, %count: i16) -> vector<3xi32> {
-  // CHECK: {{%.*}} = spv.BitFieldInsert {{%.*}}, {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi32>, i32, i16
-  %0 = spv.BitFieldInsert %base, %insert, %offset, %count : vector<3xi32>, i32, i16
-  spv.ReturnValue %0 : vector<3xi32>
+  // CHECK: {{%.*}} = spirv.BitFieldInsert {{%.*}}, {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi32>, i32, i16
+  %0 = spirv.BitFieldInsert %base, %insert, %offset, %count : vector<3xi32>, i32, i16
+  spirv.ReturnValue %0 : vector<3xi32>
 }
 
 // -----
@@ -30,67 +30,67 @@ func.func @bit_field_insert_invalid_insert_type(%base: vector<3xi32>, %insert: v
   // message. In final state the error should refer to mismatch in base and
   // insert.
   // expected-error @+1 {{type}}
-  %0 = "spv.BitFieldInsert" (%base, %insert, %offset, %count) : (vector<3xi32>, vector<2xi32>, i32, i16) -> vector<3xi32>
-  spv.ReturnValue %0 : vector<3xi32>
+  %0 = "spirv.BitFieldInsert" (%base, %insert, %offset, %count) : (vector<3xi32>, vector<2xi32>, i32, i16) -> vector<3xi32>
+  spirv.ReturnValue %0 : vector<3xi32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.BitFieldSExtract
+// spirv.BitFieldSExtract
 //===----------------------------------------------------------------------===//
 
 func.func @bit_field_s_extract_vec(%base: vector<3xi32>, %offset: i8, %count: i8) -> vector<3xi32> {
-  // CHECK: {{%.*}} = spv.BitFieldSExtract {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi32>, i8, i8
-  %0 = spv.BitFieldSExtract %base, %offset, %count : vector<3xi32>, i8, i8
-  spv.ReturnValue %0 : vector<3xi32>
+  // CHECK: {{%.*}} = spirv.BitFieldSExtract {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi32>, i8, i8
+  %0 = spirv.BitFieldSExtract %base, %offset, %count : vector<3xi32>, i8, i8
+  spirv.ReturnValue %0 : vector<3xi32>
 }
 
 //===----------------------------------------------------------------------===//
-// spv.BitFieldUExtract
+// spirv.BitFieldUExtract
 //===----------------------------------------------------------------------===//
 
 func.func @bit_field_u_extract_vec(%base: vector<3xi32>, %offset: i8, %count: i8) -> vector<3xi32> {
-  // CHECK: {{%.*}} = spv.BitFieldUExtract {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi32>, i8, i8
-  %0 = spv.BitFieldUExtract %base, %offset, %count : vector<3xi32>, i8, i8
-  spv.ReturnValue %0 : vector<3xi32>
+  // CHECK: {{%.*}} = spirv.BitFieldUExtract {{%.*}}, {{%.*}}, {{%.*}} : vector<3xi32>, i8, i8
+  %0 = spirv.BitFieldUExtract %base, %offset, %count : vector<3xi32>, i8, i8
+  spirv.ReturnValue %0 : vector<3xi32>
 }
 
 // -----
 
 func.func @bit_field_u_extract_invalid_result_type(%base: vector<3xi32>, %offset: i32, %count: i16) -> vector<4xi32> {
   // expected-error @+1 {{failed to verify that all of {base, result} have same type}}
-  %0 = "spv.BitFieldUExtract" (%base, %offset, %count) : (vector<3xi32>, i32, i16) -> vector<4xi32>
-  spv.ReturnValue %0 : vector<4xi32>
+  %0 = "spirv.BitFieldUExtract" (%base, %offset, %count) : (vector<3xi32>, i32, i16) -> vector<4xi32>
+  spirv.ReturnValue %0 : vector<4xi32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.BitReverse
+// spirv.BitReverse
 //===----------------------------------------------------------------------===//
 
 func.func @bitreverse(%arg: i32) -> i32 {
-  // CHECK: spv.BitReverse {{%.*}} : i32
-  %0 = spv.BitReverse %arg : i32
-  spv.ReturnValue %0 : i32
+  // CHECK: spirv.BitReverse {{%.*}} : i32
+  %0 = spirv.BitReverse %arg : i32
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.BitwiseOr
+// spirv.BitwiseOr
 //===----------------------------------------------------------------------===//
 
 func.func @bitwise_or_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.BitwiseOr
-  %0 = spv.BitwiseOr %arg, %arg : i32
+  // CHECK: spirv.BitwiseOr
+  %0 = spirv.BitwiseOr %arg, %arg : i32
   return %0 : i32
 }
 
 func.func @bitwise_or_vector(%arg: vector<4xi32>) -> vector<4xi32> {
-  // CHECK: spv.BitwiseOr
-  %0 = spv.BitwiseOr %arg, %arg : vector<4xi32>
+  // CHECK: spirv.BitwiseOr
+  %0 = spirv.BitwiseOr %arg, %arg : vector<4xi32>
   return %0 : vector<4xi32>
 }
 
@@ -98,25 +98,25 @@ func.func @bitwise_or_vector(%arg: vector<4xi32>) -> vector<4xi32> {
 
 func.func @bitwise_or_float(%arg0: f16, %arg1: f16) -> f16 {
   // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
-  %0 = spv.BitwiseOr %arg0, %arg1 : f16
+  %0 = spirv.BitwiseOr %arg0, %arg1 : f16
   return %0 : f16
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.BitwiseXor
+// spirv.BitwiseXor
 //===----------------------------------------------------------------------===//
 
 func.func @bitwise_xor_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.BitwiseXor
-  %0 = spv.BitwiseXor %arg, %arg : i32
+  // CHECK: spirv.BitwiseXor
+  %0 = spirv.BitwiseXor %arg, %arg : i32
   return %0 : i32
 }
 
 func.func @bitwise_xor_vector(%arg: vector<4xi32>) -> vector<4xi32> {
-  // CHECK: spv.BitwiseXor
-  %0 = spv.BitwiseXor %arg, %arg : vector<4xi32>
+  // CHECK: spirv.BitwiseXor
+  %0 = spirv.BitwiseXor %arg, %arg : vector<4xi32>
   return %0 : vector<4xi32>
 }
 
@@ -124,25 +124,25 @@ func.func @bitwise_xor_vector(%arg: vector<4xi32>) -> vector<4xi32> {
 
 func.func @bitwise_xor_float(%arg0: f16, %arg1: f16) -> f16 {
   // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
-  %0 = spv.BitwiseXor %arg0, %arg1 : f16
+  %0 = spirv.BitwiseXor %arg0, %arg1 : f16
   return %0 : f16
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.BitwiseAnd
+// spirv.BitwiseAnd
 //===----------------------------------------------------------------------===//
 
 func.func @bitwise_and_scalar(%arg: i32) -> i32 {
-  // CHECK: spv.BitwiseAnd
-  %0 = spv.BitwiseAnd %arg, %arg : i32
+  // CHECK: spirv.BitwiseAnd
+  %0 = spirv.BitwiseAnd %arg, %arg : i32
   return %0 : i32
 }
 
 func.func @bitwise_and_vector(%arg: vector<4xi32>) -> vector<4xi32> {
-  // CHECK: spv.BitwiseAnd
-  %0 = spv.BitwiseAnd %arg, %arg : vector<4xi32>
+  // CHECK: spirv.BitwiseAnd
+  %0 = spirv.BitwiseAnd %arg, %arg : vector<4xi32>
   return %0 : vector<4xi32>
 }
 
@@ -150,62 +150,62 @@ func.func @bitwise_and_vector(%arg: vector<4xi32>) -> vector<4xi32> {
 
 func.func @bitwise_and_float(%arg0: f16, %arg1: f16) -> f16 {
   // expected-error @+1 {{operand #0 must be 8/16/32/64-bit integer or vector of 8/16/32/64-bit integer values of length 2/3/4}}
-  %0 = spv.BitwiseAnd %arg0, %arg1 : f16
+  %0 = spirv.BitwiseAnd %arg0, %arg1 : f16
   return %0 : f16
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.Not
+// spirv.Not
 //===----------------------------------------------------------------------===//
 
 func.func @not(%arg: i32) -> i32 {
-  // CHECK: spv.Not {{%.*}} : i32
-  %0 = spv.Not %arg : i32
-  spv.ReturnValue %0 : i32
+  // CHECK: spirv.Not {{%.*}} : i32
+  %0 = spirv.Not %arg : i32
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftLeftLogical
+// spirv.ShiftLeftLogical
 //===----------------------------------------------------------------------===//
 
 func.func @shift_left_logical(%arg0: i32, %arg1 : i16) -> i32 {
-  // CHECK: {{%.*}} = spv.ShiftLeftLogical {{%.*}}, {{%.*}} : i32, i16
-  %0 = spv.ShiftLeftLogical %arg0, %arg1: i32, i16
-  spv.ReturnValue %0 : i32
+  // CHECK: {{%.*}} = spirv.ShiftLeftLogical {{%.*}}, {{%.*}} : i32, i16
+  %0 = spirv.ShiftLeftLogical %arg0, %arg1: i32, i16
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 func.func @shift_left_logical_invalid_result_type(%arg0: i32, %arg1 : i16) -> i16 {
   // expected-error @+1 {{op failed to verify that all of {operand1, result} have same type}}
-  %0 = "spv.ShiftLeftLogical" (%arg0, %arg1) : (i32, i16) -> (i16)
-  spv.ReturnValue %0 : i16
+  %0 = "spirv.ShiftLeftLogical" (%arg0, %arg1) : (i32, i16) -> (i16)
+  spirv.ReturnValue %0 : i16
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftRightArithmetic
+// spirv.ShiftRightArithmetic
 //===----------------------------------------------------------------------===//
 
 func.func @shift_right_arithmetic(%arg0: vector<4xi32>, %arg1 : vector<4xi8>) -> vector<4xi32> {
-  // CHECK: {{%.*}} = spv.ShiftRightArithmetic {{%.*}}, {{%.*}} : vector<4xi32>, vector<4xi8>
-  %0 = spv.ShiftRightArithmetic %arg0, %arg1: vector<4xi32>, vector<4xi8>
-  spv.ReturnValue %0 : vector<4xi32>
+  // CHECK: {{%.*}} = spirv.ShiftRightArithmetic {{%.*}}, {{%.*}} : vector<4xi32>, vector<4xi8>
+  %0 = spirv.ShiftRightArithmetic %arg0, %arg1: vector<4xi32>, vector<4xi8>
+  spirv.ReturnValue %0 : vector<4xi32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ShiftRightLogical
+// spirv.ShiftRightLogical
 //===----------------------------------------------------------------------===//
 
 func.func @shift_right_logical(%arg0: vector<2xi32>, %arg1 : vector<2xi8>) -> vector<2xi32> {
-  // CHECK: {{%.*}} = spv.ShiftRightLogical {{%.*}}, {{%.*}} : vector<2xi32>, vector<2xi8>
-  %0 = spv.ShiftRightLogical %arg0, %arg1: vector<2xi32>, vector<2xi8>
-  spv.ReturnValue %0 : vector<2xi32>
+  // CHECK: {{%.*}} = spirv.ShiftRightLogical {{%.*}}, {{%.*}} : vector<2xi32>, vector<2xi8>
+  %0 = spirv.ShiftRightLogical %arg0, %arg1: vector<2xi32>, vector<2xi8>
+  spirv.ReturnValue %0 : vector<2xi32>
 }

diff  --git a/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir b/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir
index 0f24d871a699..4a7bd603b14d 100644
--- a/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/cast-ops.mlir
@@ -1,42 +1,42 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.Bitcast
+// spirv.Bitcast
 //===----------------------------------------------------------------------===//
 
 func.func @cast1(%arg0 : f32) {
-  // CHECK: {{%.*}} = spv.Bitcast {{%.*}} : f32 to i32
-  %0 = spv.Bitcast %arg0 : f32 to i32
+  // CHECK: {{%.*}} = spirv.Bitcast {{%.*}} : f32 to i32
+  %0 = spirv.Bitcast %arg0 : f32 to i32
   return
 }
 
 func.func @cast2(%arg0 : vector<2xf32>) {
-  // CHECK: {{%.*}} = spv.Bitcast {{%.*}} : vector<2xf32> to vector<2xi32>
-  %0 = spv.Bitcast %arg0 : vector<2xf32> to vector<2xi32>
+  // CHECK: {{%.*}} = spirv.Bitcast {{%.*}} : vector<2xf32> to vector<2xi32>
+  %0 = spirv.Bitcast %arg0 : vector<2xf32> to vector<2xi32>
   return
 }
 
 func.func @cast3(%arg0 : vector<2xf32>) {
-  // CHECK: {{%.*}} = spv.Bitcast {{%.*}} : vector<2xf32> to i64
-  %0 = spv.Bitcast %arg0 : vector<2xf32> to i64
+  // CHECK: {{%.*}} = spirv.Bitcast {{%.*}} : vector<2xf32> to i64
+  %0 = spirv.Bitcast %arg0 : vector<2xf32> to i64
   return
 }
 
-func.func @cast4(%arg0 : !spv.ptr<f32, Function>) {
-  // CHECK: {{%.*}} = spv.Bitcast {{%.*}} : !spv.ptr<f32, Function> to !spv.ptr<i32, Function>
-  %0 = spv.Bitcast %arg0 : !spv.ptr<f32, Function> to !spv.ptr<i32, Function>
+func.func @cast4(%arg0 : !spirv.ptr<f32, Function>) {
+  // CHECK: {{%.*}} = spirv.Bitcast {{%.*}} : !spirv.ptr<f32, Function> to !spirv.ptr<i32, Function>
+  %0 = spirv.Bitcast %arg0 : !spirv.ptr<f32, Function> to !spirv.ptr<i32, Function>
   return
 }
 
-func.func @cast5(%arg0 : !spv.ptr<f32, Function>) {
-  // CHECK: {{%.*}} = spv.Bitcast {{%.*}} : !spv.ptr<f32, Function> to !spv.ptr<vector<2xi32>, Function>
-  %0 = spv.Bitcast %arg0 : !spv.ptr<f32, Function> to !spv.ptr<vector<2xi32>, Function>
+func.func @cast5(%arg0 : !spirv.ptr<f32, Function>) {
+  // CHECK: {{%.*}} = spirv.Bitcast {{%.*}} : !spirv.ptr<f32, Function> to !spirv.ptr<vector<2xi32>, Function>
+  %0 = spirv.Bitcast %arg0 : !spirv.ptr<f32, Function> to !spirv.ptr<vector<2xi32>, Function>
   return
 }
 
 func.func @cast6(%arg0 : vector<4xf32>) {
-  // CHECK: {{%.*}} = spv.Bitcast {{%.*}} : vector<4xf32> to vector<2xi64>
-  %0 = spv.Bitcast %arg0 : vector<4xf32> to vector<2xi64>
+  // CHECK: {{%.*}} = spirv.Bitcast {{%.*}} : vector<4xf32> to vector<2xi64>
+  %0 = spirv.Bitcast %arg0 : vector<4xf32> to vector<2xi64>
   return
 }
 
@@ -44,7 +44,7 @@ func.func @cast6(%arg0 : vector<4xf32>) {
 
 func.func @cast1(%arg0 : f32) {
   // expected-error @+1 {{result type must be 
diff erent from operand type}}
-  %0 = spv.Bitcast %arg0 : f32 to f32
+  %0 = spirv.Bitcast %arg0 : f32 to f32
   return
 }
 
@@ -52,7 +52,7 @@ func.func @cast1(%arg0 : f32) {
 
 func.func @cast1(%arg0 : f32) {
   // expected-error @+1 {{mismatch in result type bitwidth 64 and operand type bitwidth 32}}
-  %0 = spv.Bitcast %arg0 : f32 to i64
+  %0 = spirv.Bitcast %arg0 : f32 to i64
   return
 }
 
@@ -60,15 +60,15 @@ func.func @cast1(%arg0 : f32) {
 
 func.func @cast1(%arg0 : vector<2xf32>) {
   // expected-error @+1 {{mismatch in result type bitwidth 96 and operand type bitwidth 64}}
-  %0 = spv.Bitcast %arg0 : vector<2xf32> to vector<3xf32>
+  %0 = spirv.Bitcast %arg0 : vector<2xf32> to vector<3xf32>
   return
 }
 
 // -----
 
-func.func @cast3(%arg0 : !spv.ptr<f32, Function>) {
+func.func @cast3(%arg0 : !spirv.ptr<f32, Function>) {
   // expected-error @+1 {{unhandled bit cast conversion from pointer type to non-pointer type}}
-  %0 = spv.Bitcast %arg0 : !spv.ptr<f32, Function> to i64
+  %0 = spirv.Bitcast %arg0 : !spirv.ptr<f32, Function> to i64
   return
 }
 
@@ -76,290 +76,290 @@ func.func @cast3(%arg0 : !spv.ptr<f32, Function>) {
 
 func.func @cast3(%arg0 : i64) {
   // expected-error @+1 {{unhandled bit cast conversion from non-pointer type to pointer type}}
-  %0 = spv.Bitcast %arg0 : i64 to !spv.ptr<f32, Function>
+  %0 = spirv.Bitcast %arg0 : i64 to !spirv.ptr<f32, Function>
   return
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertFToS
+// spirv.ConvertFToS
 //===----------------------------------------------------------------------===//
 
 func.func @convert_f_to_s_scalar(%arg0 : f32) -> i32 {
-  // CHECK: {{%.*}} = spv.ConvertFToS {{%.*}} : f32 to i32
-  %0 = spv.ConvertFToS %arg0 : f32 to i32
-  spv.ReturnValue %0 : i32
+  // CHECK: {{%.*}} = spirv.ConvertFToS {{%.*}} : f32 to i32
+  %0 = spirv.ConvertFToS %arg0 : f32 to i32
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 func.func @convert_f64_to_s32_scalar(%arg0 : f64) -> i32 {
-  // CHECK: {{%.*}} = spv.ConvertFToS {{%.*}} : f64 to i32
-  %0 = spv.ConvertFToS %arg0 : f64 to i32
-  spv.ReturnValue %0 : i32
+  // CHECK: {{%.*}} = spirv.ConvertFToS {{%.*}} : f64 to i32
+  %0 = spirv.ConvertFToS %arg0 : f64 to i32
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 func.func @convert_f_to_s_vector(%arg0 : vector<3xf32>) -> vector<3xi32> {
-  // CHECK: {{%.*}} = spv.ConvertFToS {{%.*}} : vector<3xf32> to vector<3xi32>
-  %0 = spv.ConvertFToS %arg0 : vector<3xf32> to vector<3xi32>
-  spv.ReturnValue %0 : vector<3xi32>
+  // CHECK: {{%.*}} = spirv.ConvertFToS {{%.*}} : vector<3xf32> to vector<3xi32>
+  %0 = spirv.ConvertFToS %arg0 : vector<3xf32> to vector<3xi32>
+  spirv.ReturnValue %0 : vector<3xi32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertFToU
+// spirv.ConvertFToU
 //===----------------------------------------------------------------------===//
 
 func.func @convert_f_to_u_scalar(%arg0 : f32) -> i32 {
-  // CHECK: {{%.*}} = spv.ConvertFToU {{%.*}} : f32 to i32
-  %0 = spv.ConvertFToU %arg0 : f32 to i32
-  spv.ReturnValue %0 : i32
+  // CHECK: {{%.*}} = spirv.ConvertFToU {{%.*}} : f32 to i32
+  %0 = spirv.ConvertFToU %arg0 : f32 to i32
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 func.func @convert_f64_to_u32_scalar(%arg0 : f64) -> i32 {
-  // CHECK: {{%.*}} = spv.ConvertFToU {{%.*}} : f64 to i32
-  %0 = spv.ConvertFToU %arg0 : f64 to i32
-  spv.ReturnValue %0 : i32
+  // CHECK: {{%.*}} = spirv.ConvertFToU {{%.*}} : f64 to i32
+  %0 = spirv.ConvertFToU %arg0 : f64 to i32
+  spirv.ReturnValue %0 : i32
 }
 
 // -----
 
 func.func @convert_f_to_u_vector(%arg0 : vector<3xf32>) -> vector<3xi32> {
-  // CHECK: {{%.*}} = spv.ConvertFToU {{%.*}} : vector<3xf32> to vector<3xi32>
-  %0 = spv.ConvertFToU %arg0 : vector<3xf32> to vector<3xi32>
-  spv.ReturnValue %0 : vector<3xi32>
+  // CHECK: {{%.*}} = spirv.ConvertFToU {{%.*}} : vector<3xf32> to vector<3xi32>
+  %0 = spirv.ConvertFToU %arg0 : vector<3xf32> to vector<3xi32>
+  spirv.ReturnValue %0 : vector<3xi32>
 }
 
 // -----
 
-func.func @convert_f_to_u_coopmatrix(%arg0 : !spv.coopmatrix<8x16xf32, Subgroup>) {
-  // CHECK: {{%.*}} = spv.ConvertFToU {{%.*}} : !spv.coopmatrix<8x16xf32, Subgroup> to !spv.coopmatrix<8x16xi32, Subgroup>
-  %0 = spv.ConvertFToU %arg0 : !spv.coopmatrix<8x16xf32, Subgroup> to !spv.coopmatrix<8x16xi32, Subgroup>
-  spv.Return
+func.func @convert_f_to_u_coopmatrix(%arg0 : !spirv.coopmatrix<8x16xf32, Subgroup>) {
+  // CHECK: {{%.*}} = spirv.ConvertFToU {{%.*}} : !spirv.coopmatrix<8x16xf32, Subgroup> to !spirv.coopmatrix<8x16xi32, Subgroup>
+  %0 = spirv.ConvertFToU %arg0 : !spirv.coopmatrix<8x16xf32, Subgroup> to !spirv.coopmatrix<8x16xi32, Subgroup>
+  spirv.Return
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertSToF
+// spirv.ConvertSToF
 //===----------------------------------------------------------------------===//
 
 func.func @convert_s_to_f_scalar(%arg0 : i32) -> f32 {
-  // CHECK: {{%.*}} = spv.ConvertSToF {{%.*}} : i32 to f32
-  %0 = spv.ConvertSToF %arg0 : i32 to f32
-  spv.ReturnValue %0 : f32
+  // CHECK: {{%.*}} = spirv.ConvertSToF {{%.*}} : i32 to f32
+  %0 = spirv.ConvertSToF %arg0 : i32 to f32
+  spirv.ReturnValue %0 : f32
 }
 
 // -----
 
 func.func @convert_s64_to_f32_scalar(%arg0 : i64) -> f32 {
-  // CHECK: {{%.*}} = spv.ConvertSToF {{%.*}} : i64 to f32
-  %0 = spv.ConvertSToF %arg0 : i64 to f32
-  spv.ReturnValue %0 : f32
+  // CHECK: {{%.*}} = spirv.ConvertSToF {{%.*}} : i64 to f32
+  %0 = spirv.ConvertSToF %arg0 : i64 to f32
+  spirv.ReturnValue %0 : f32
 }
 
 // -----
 
 func.func @convert_s_to_f_vector(%arg0 : vector<3xi32>) -> vector<3xf32> {
-  // CHECK: {{%.*}} = spv.ConvertSToF {{%.*}} : vector<3xi32> to vector<3xf32>
-  %0 = spv.ConvertSToF %arg0 : vector<3xi32> to vector<3xf32>
-  spv.ReturnValue %0 : vector<3xf32>
+  // CHECK: {{%.*}} = spirv.ConvertSToF {{%.*}} : vector<3xi32> to vector<3xf32>
+  %0 = spirv.ConvertSToF %arg0 : vector<3xi32> to vector<3xf32>
+  spirv.ReturnValue %0 : vector<3xf32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.ConvertUToF
+// spirv.ConvertUToF
 //===----------------------------------------------------------------------===//
 
 func.func @convert_u_to_f_scalar(%arg0 : i32) -> f32 {
-  // CHECK: {{%.*}} = spv.ConvertUToF {{%.*}} : i32 to f32
-  %0 = spv.ConvertUToF %arg0 : i32 to f32
-  spv.ReturnValue %0 : f32
+  // CHECK: {{%.*}} = spirv.ConvertUToF {{%.*}} : i32 to f32
+  %0 = spirv.ConvertUToF %arg0 : i32 to f32
+  spirv.ReturnValue %0 : f32
 }
 
 // -----
 
 func.func @convert_u64_to_f32_scalar(%arg0 : i64) -> f32 {
-  // CHECK: {{%.*}} = spv.ConvertUToF {{%.*}} : i64 to f32
-  %0 = spv.ConvertUToF %arg0 : i64 to f32
-  spv.ReturnValue %0 : f32
+  // CHECK: {{%.*}} = spirv.ConvertUToF {{%.*}} : i64 to f32
+  %0 = spirv.ConvertUToF %arg0 : i64 to f32
+  spirv.ReturnValue %0 : f32
 }
 
 // -----
 
 func.func @convert_u_to_f_vector(%arg0 : vector<3xi32>) -> vector<3xf32> {
-  // CHECK: {{%.*}} = spv.ConvertUToF {{%.*}} : vector<3xi32> to vector<3xf32>
-  %0 = spv.ConvertUToF %arg0 : vector<3xi32> to vector<3xf32>
-  spv.ReturnValue %0 : vector<3xf32>
+  // CHECK: {{%.*}} = spirv.ConvertUToF {{%.*}} : vector<3xi32> to vector<3xf32>
+  %0 = spirv.ConvertUToF %arg0 : vector<3xi32> to vector<3xf32>
+  spirv.ReturnValue %0 : vector<3xf32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.FConvert
+// spirv.FConvert
 //===----------------------------------------------------------------------===//
 
 func.func @f_convert_scalar(%arg0 : f32) -> f64 {
-  // CHECK: {{%.*}} = spv.FConvert {{%.*}} : f32 to f64
-  %0 = spv.FConvert %arg0 : f32 to f64
-  spv.ReturnValue %0 : f64
+  // CHECK: {{%.*}} = spirv.FConvert {{%.*}} : f32 to f64
+  %0 = spirv.FConvert %arg0 : f32 to f64
+  spirv.ReturnValue %0 : f64
 }
 
 // -----
 
 func.func @f_convert_vector(%arg0 : vector<3xf32>) -> vector<3xf64> {
-  // CHECK: {{%.*}} = spv.FConvert {{%.*}} : vector<3xf32> to vector<3xf64>
-  %0 = spv.FConvert %arg0 : vector<3xf32> to vector<3xf64>
-  spv.ReturnValue %0 : vector<3xf64>
+  // CHECK: {{%.*}} = spirv.FConvert {{%.*}} : vector<3xf32> to vector<3xf64>
+  %0 = spirv.FConvert %arg0 : vector<3xf32> to vector<3xf64>
+  spirv.ReturnValue %0 : vector<3xf64>
 }
 
 // -----
 
-func.func @f_convert_coop_matrix(%arg0 : !spv.coopmatrix<8x16xf32, Subgroup>) {
-  // CHECK: {{%.*}} = spv.FConvert {{%.*}} : !spv.coopmatrix<8x16xf32, Subgroup> to !spv.coopmatrix<8x16xf64, Subgroup>
-  %0 = spv.FConvert %arg0 : !spv.coopmatrix<8x16xf32, Subgroup> to !spv.coopmatrix<8x16xf64, Subgroup>
-  spv.Return
+func.func @f_convert_coop_matrix(%arg0 : !spirv.coopmatrix<8x16xf32, Subgroup>) {
+  // CHECK: {{%.*}} = spirv.FConvert {{%.*}} : !spirv.coopmatrix<8x16xf32, Subgroup> to !spirv.coopmatrix<8x16xf64, Subgroup>
+  %0 = spirv.FConvert %arg0 : !spirv.coopmatrix<8x16xf32, Subgroup> to !spirv.coopmatrix<8x16xf64, Subgroup>
+  spirv.Return
 }
 
 // -----
 
 func.func @f_convert_vector(%arg0 : f32) -> f32 {
   // expected-error @+1 {{expected the 
diff erent bit widths for operand type and result type, but provided 'f32' and 'f32'}}
-  %0 = spv.FConvert %arg0 : f32 to f32
-  spv.ReturnValue %0 : f32
+  %0 = spirv.FConvert %arg0 : f32 to f32
+  spirv.ReturnValue %0 : f32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.SConvert
+// spirv.SConvert
 //===----------------------------------------------------------------------===//
 
 func.func @s_convert_scalar(%arg0 : i32) -> i64 {
-  // CHECK: {{%.*}} = spv.SConvert {{%.*}} : i32 to i64
-  %0 = spv.SConvert %arg0 : i32 to i64
-  spv.ReturnValue %0 : i64
+  // CHECK: {{%.*}} = spirv.SConvert {{%.*}} : i32 to i64
+  %0 = spirv.SConvert %arg0 : i32 to i64
+  spirv.ReturnValue %0 : i64
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.UConvert
+// spirv.UConvert
 //===----------------------------------------------------------------------===//
 
 func.func @u_convert_scalar(%arg0 : i32) -> i64 {
-  // CHECK: {{%.*}} = spv.UConvert {{%.*}} : i32 to i64
-  %0 = spv.UConvert %arg0 : i32 to i64
-  spv.ReturnValue %0 : i64
+  // CHECK: {{%.*}} = spirv.UConvert {{%.*}} : i32 to i64
+  %0 = spirv.UConvert %arg0 : i32 to i64
+  spirv.ReturnValue %0 : i64
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.PtrCastToGeneric
+// spirv.PtrCastToGeneric
 //===----------------------------------------------------------------------===//
 
-func.func @ptrcasttogeneric1(%arg0 : !spv.ptr<f32, CrossWorkgroup>) {
-  // CHECK: {{%.*}} = spv.PtrCastToGeneric {{%.*}} : !spv.ptr<f32, CrossWorkgroup> to !spv.ptr<f32, Generic>
-  %0 = spv.PtrCastToGeneric %arg0 : !spv.ptr<f32, CrossWorkgroup> to !spv.ptr<f32, Generic>
+func.func @ptrcasttogeneric1(%arg0 : !spirv.ptr<f32, CrossWorkgroup>) {
+  // CHECK: {{%.*}} = spirv.PtrCastToGeneric {{%.*}} : !spirv.ptr<f32, CrossWorkgroup> to !spirv.ptr<f32, Generic>
+  %0 = spirv.PtrCastToGeneric %arg0 : !spirv.ptr<f32, CrossWorkgroup> to !spirv.ptr<f32, Generic>
   return
 }
 // -----
 
-func.func @ptrcasttogeneric2(%arg0 : !spv.ptr<f32, StorageBuffer>) {
+func.func @ptrcasttogeneric2(%arg0 : !spirv.ptr<f32, StorageBuffer>) {
   // expected-error @+1 {{pointer must point to the Workgroup, CrossWorkgroup, or Function Storage Class}}
-  %0 = spv.PtrCastToGeneric %arg0 : !spv.ptr<f32, StorageBuffer> to !spv.ptr<f32, Generic>
+  %0 = spirv.PtrCastToGeneric %arg0 : !spirv.ptr<f32, StorageBuffer> to !spirv.ptr<f32, Generic>
   return
 }
 
 // -----
 
-func.func @ptrcasttogeneric3(%arg0 : !spv.ptr<f32, CrossWorkgroup>) {
+func.func @ptrcasttogeneric3(%arg0 : !spirv.ptr<f32, CrossWorkgroup>) {
   // expected-error @+1 {{result type must be of storage class Generic}}
-  %0 = spv.PtrCastToGeneric %arg0 : !spv.ptr<f32, CrossWorkgroup> to !spv.ptr<f32, Uniform>
+  %0 = spirv.PtrCastToGeneric %arg0 : !spirv.ptr<f32, CrossWorkgroup> to !spirv.ptr<f32, Uniform>
   return
 }
 
 // -----
 
-func.func @ptrcasttogeneric4(%arg0 : !spv.ptr<f32, CrossWorkgroup>) {
+func.func @ptrcasttogeneric4(%arg0 : !spirv.ptr<f32, CrossWorkgroup>) {
   // expected-error @+1 {{pointee type must have the same as the op result type}}
-  %0 = spv.PtrCastToGeneric %arg0 : !spv.ptr<f32, CrossWorkgroup> to !spv.ptr<vector<2xi32>, Generic>
+  %0 = spirv.PtrCastToGeneric %arg0 : !spirv.ptr<f32, CrossWorkgroup> to !spirv.ptr<vector<2xi32>, Generic>
   return
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.GenericCastToPtr
+// spirv.GenericCastToPtr
 //===----------------------------------------------------------------------===//
 
-func.func @genericcasttoptr1(%arg0 : !spv.ptr<vector<2xi32>, Generic>) {
-  // CHECK: {{%.*}} = spv.GenericCastToPtr {{%.*}} : !spv.ptr<vector<2xi32>, Generic> to !spv.ptr<vector<2xi32>, CrossWorkgroup>
-  %0 = spv.GenericCastToPtr %arg0 : !spv.ptr<vector<2xi32>, Generic> to !spv.ptr<vector<2xi32>, CrossWorkgroup>
+func.func @genericcasttoptr1(%arg0 : !spirv.ptr<vector<2xi32>, Generic>) {
+  // CHECK: {{%.*}} = spirv.GenericCastToPtr {{%.*}} : !spirv.ptr<vector<2xi32>, Generic> to !spirv.ptr<vector<2xi32>, CrossWorkgroup>
+  %0 = spirv.GenericCastToPtr %arg0 : !spirv.ptr<vector<2xi32>, Generic> to !spirv.ptr<vector<2xi32>, CrossWorkgroup>
   return
 }
 // -----
 
-func.func @genericcasttoptr2(%arg0 : !spv.ptr<f32, Uniform>) {
+func.func @genericcasttoptr2(%arg0 : !spirv.ptr<f32, Uniform>) {
   // expected-error @+1 {{pointer type must be of storage class Generic}}
-  %0 = spv.GenericCastToPtr %arg0 : !spv.ptr<f32, Uniform> to !spv.ptr<f32, Workgroup>
+  %0 = spirv.GenericCastToPtr %arg0 : !spirv.ptr<f32, Uniform> to !spirv.ptr<f32, Workgroup>
   return
 }
 
 // -----
 
-func.func @genericcasttoptr3(%arg0 : !spv.ptr<f32, Generic>) {
+func.func @genericcasttoptr3(%arg0 : !spirv.ptr<f32, Generic>) {
   // expected-error @+1 {{result must point to the Workgroup, CrossWorkgroup, or Function Storage Class}}
-  %0 = spv.GenericCastToPtr %arg0 : !spv.ptr<f32, Generic> to !spv.ptr<f32, Uniform>
+  %0 = spirv.GenericCastToPtr %arg0 : !spirv.ptr<f32, Generic> to !spirv.ptr<f32, Uniform>
   return
 }
 
 // -----
 
-func.func @genericcasttoptr4(%arg0 : !spv.ptr<f32, Generic>) {
+func.func @genericcasttoptr4(%arg0 : !spirv.ptr<f32, Generic>) {
   // expected-error @+1 {{pointee type must have the same as the op result type}}
-  %0 = spv.GenericCastToPtr %arg0 : !spv.ptr<f32, Generic> to !spv.ptr<vector<2xi32>, Workgroup>
+  %0 = spirv.GenericCastToPtr %arg0 : !spirv.ptr<f32, Generic> to !spirv.ptr<vector<2xi32>, Workgroup>
   return
 }
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.GenericCastToPtrExplicit
+// spirv.GenericCastToPtrExplicit
 //===----------------------------------------------------------------------===//
 
-func.func @genericcasttoptrexplicit1(%arg0 : !spv.ptr<vector<2xi32>, Generic>) {
-  // CHECK: {{%.*}} = spv.GenericCastToPtrExplicit {{%.*}} : !spv.ptr<vector<2xi32>, Generic> to !spv.ptr<vector<2xi32>, CrossWorkgroup>
-  %0 = spv.GenericCastToPtrExplicit %arg0 : !spv.ptr<vector<2xi32>, Generic> to !spv.ptr<vector<2xi32>, CrossWorkgroup>
+func.func @genericcasttoptrexplicit1(%arg0 : !spirv.ptr<vector<2xi32>, Generic>) {
+  // CHECK: {{%.*}} = spirv.GenericCastToPtrExplicit {{%.*}} : !spirv.ptr<vector<2xi32>, Generic> to !spirv.ptr<vector<2xi32>, CrossWorkgroup>
+  %0 = spirv.GenericCastToPtrExplicit %arg0 : !spirv.ptr<vector<2xi32>, Generic> to !spirv.ptr<vector<2xi32>, CrossWorkgroup>
   return
 }
 // -----
 
-func.func @genericcasttoptrexplicit2(%arg0 : !spv.ptr<f32, Uniform>) {
+func.func @genericcasttoptrexplicit2(%arg0 : !spirv.ptr<f32, Uniform>) {
   // expected-error @+1 {{pointer type must be of storage class Generic}}
-  %0 = spv.GenericCastToPtrExplicit %arg0 : !spv.ptr<f32, Uniform> to !spv.ptr<f32, Workgroup>
+  %0 = spirv.GenericCastToPtrExplicit %arg0 : !spirv.ptr<f32, Uniform> to !spirv.ptr<f32, Workgroup>
   return
 }
 
 // -----
 
-func.func @genericcasttoptrexplicit3(%arg0 : !spv.ptr<f32, Generic>) {
+func.func @genericcasttoptrexplicit3(%arg0 : !spirv.ptr<f32, Generic>) {
   // expected-error @+1 {{result must point to the Workgroup, CrossWorkgroup, or Function Storage Class}}
-  %0 = spv.GenericCastToPtrExplicit %arg0 : !spv.ptr<f32, Generic> to !spv.ptr<f32, Uniform>
+  %0 = spirv.GenericCastToPtrExplicit %arg0 : !spirv.ptr<f32, Generic> to !spirv.ptr<f32, Uniform>
   return
 }
 
 // -----
 
-func.func @genericcasttoptrexplicit4(%arg0 : !spv.ptr<f32, Generic>) {
+func.func @genericcasttoptrexplicit4(%arg0 : !spirv.ptr<f32, Generic>) {
   // expected-error @+1 {{pointee type must have the same as the op result type}}
-  %0 = spv.GenericCastToPtrExplicit %arg0 : !spv.ptr<f32, Generic> to !spv.ptr<vector<2xi32>, Workgroup>
+  %0 = spirv.GenericCastToPtrExplicit %arg0 : !spirv.ptr<f32, Generic> to !spirv.ptr<vector<2xi32>, Workgroup>
   return
 }

diff  --git a/mlir/test/Dialect/SPIRV/IR/composite-ops.mlir b/mlir/test/Dialect/SPIRV/IR/composite-ops.mlir
index fc9ba780c381..e363d14b29ad 100644
--- a/mlir/test/Dialect/SPIRV/IR/composite-ops.mlir
+++ b/mlir/test/Dialect/SPIRV/IR/composite-ops.mlir
@@ -1,45 +1,45 @@
 // RUN: mlir-opt -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeConstruct
+// spirv.CompositeConstruct
 //===----------------------------------------------------------------------===//
 
 func.func @composite_construct_vector(%arg0: f32, %arg1: f32, %arg2 : f32) -> vector<3xf32> {
-  // CHECK: spv.CompositeConstruct {{%.*}}, {{%.*}}, {{%.*}} : (f32, f32, f32) -> vector<3xf32>
-  %0 = spv.CompositeConstruct %arg0, %arg1, %arg2 : (f32, f32, f32) -> vector<3xf32>
+  // CHECK: spirv.CompositeConstruct {{%.*}}, {{%.*}}, {{%.*}} : (f32, f32, f32) -> vector<3xf32>
+  %0 = spirv.CompositeConstruct %arg0, %arg1, %arg2 : (f32, f32, f32) -> vector<3xf32>
   return %0: vector<3xf32>
 }
 
 // -----
 
-func.func @composite_construct_struct(%arg0: vector<3xf32>, %arg1: !spv.array<4xf32>, %arg2 : !spv.struct<(f32)>) -> !spv.struct<(vector<3xf32>, !spv.array<4xf32>, !spv.struct<(f32)>)> {
-  // CHECK: spv.CompositeConstruct
-  %0 = spv.CompositeConstruct %arg0, %arg1, %arg2 : (vector<3xf32>, !spv.array<4xf32>, !spv.struct<(f32)>) -> !spv.struct<(vector<3xf32>, !spv.array<4xf32>, !spv.struct<(f32)>)>
-  return %0: !spv.struct<(vector<3xf32>, !spv.array<4xf32>, !spv.struct<(f32)>)>
+func.func @composite_construct_struct(%arg0: vector<3xf32>, %arg1: !spirv.array<4xf32>, %arg2 : !spirv.struct<(f32)>) -> !spirv.struct<(vector<3xf32>, !spirv.array<4xf32>, !spirv.struct<(f32)>)> {
+  // CHECK: spirv.CompositeConstruct
+  %0 = spirv.CompositeConstruct %arg0, %arg1, %arg2 : (vector<3xf32>, !spirv.array<4xf32>, !spirv.struct<(f32)>) -> !spirv.struct<(vector<3xf32>, !spirv.array<4xf32>, !spirv.struct<(f32)>)>
+  return %0: !spirv.struct<(vector<3xf32>, !spirv.array<4xf32>, !spirv.struct<(f32)>)>
 }
 
 // -----
 
 // CHECK-LABEL: func @composite_construct_mixed_scalar_vector
 func.func @composite_construct_mixed_scalar_vector(%arg0: f32, %arg1: f32, %arg2 : vector<2xf32>) -> vector<4xf32> {
-  // CHECK: spv.CompositeConstruct %{{.+}}, %{{.+}}, %{{.+}} : (f32, vector<2xf32>, f32) -> vector<4xf32>
-  %0 = spv.CompositeConstruct %arg0, %arg2, %arg1 : (f32, vector<2xf32>, f32) -> vector<4xf32>
+  // CHECK: spirv.CompositeConstruct %{{.+}}, %{{.+}}, %{{.+}} : (f32, vector<2xf32>, f32) -> vector<4xf32>
+  %0 = spirv.CompositeConstruct %arg0, %arg2, %arg1 : (f32, vector<2xf32>, f32) -> vector<4xf32>
   return %0: vector<4xf32>
 }
 
 // -----
 
-func.func @composite_construct_coopmatrix(%arg0 : f32) -> !spv.coopmatrix<8x16xf32, Subgroup> {
-  // CHECK: spv.CompositeConstruct {{%.*}} : (f32) -> !spv.coopmatrix<8x16xf32, Subgroup>
-  %0 = spv.CompositeConstruct %arg0 : (f32) -> !spv.coopmatrix<8x16xf32, Subgroup>
-  return %0: !spv.coopmatrix<8x16xf32, Subgroup>
+func.func @composite_construct_coopmatrix(%arg0 : f32) -> !spirv.coopmatrix<8x16xf32, Subgroup> {
+  // CHECK: spirv.CompositeConstruct {{%.*}} : (f32) -> !spirv.coopmatrix<8x16xf32, Subgroup>
+  %0 = spirv.CompositeConstruct %arg0 : (f32) -> !spirv.coopmatrix<8x16xf32, Subgroup>
+  return %0: !spirv.coopmatrix<8x16xf32, Subgroup>
 }
 
 // -----
 
 func.func @composite_construct_invalid_result_type(%arg0: f32, %arg1: f32, %arg2 : f32) -> vector<3xf32> {
   // expected-error @+1 {{has incorrect number of operands: expected 3, but provided 2}}
-  %0 = spv.CompositeConstruct %arg0, %arg2 : (f32, f32) -> vector<3xf32>
+  %0 = spirv.CompositeConstruct %arg0, %arg2 : (f32, f32) -> vector<3xf32>
   return %0: vector<3xf32>
 }
 
@@ -47,39 +47,39 @@ func.func @composite_construct_invalid_result_type(%arg0: f32, %arg1: f32, %arg2
 
 func.func @composite_construct_invalid_operand_type(%arg0: f32, %arg1: f32, %arg2 : f32) -> vector<3xi32> {
   // expected-error @+1 {{operand type mismatch: expected operand type 'i32', but provided 'f32'}}
-  %0 = spv.CompositeConstruct %arg0, %arg1, %arg2 : (f32, f32, f32) -> vector<3xi32>
+  %0 = spirv.CompositeConstruct %arg0, %arg1, %arg2 : (f32, f32, f32) -> vector<3xi32>
   return %0: vector<3xi32>
 }
 
 // -----
 
-func.func @composite_construct_coopmatrix_incorrect_operand_count(%arg0 : f32, %arg1 : f32) -> !spv.coopmatrix<8x16xf32, Subgroup> {
+func.func @composite_construct_coopmatrix_incorrect_operand_count(%arg0 : f32, %arg1 : f32) -> !spirv.coopmatrix<8x16xf32, Subgroup> {
   // expected-error @+1 {{has incorrect number of operands: expected 1, but provided 2}}
-  %0 = spv.CompositeConstruct %arg0, %arg1 : (f32, f32) -> !spv.coopmatrix<8x16xf32, Subgroup>
-  return %0: !spv.coopmatrix<8x16xf32, Subgroup>
+  %0 = spirv.CompositeConstruct %arg0, %arg1 : (f32, f32) -> !spirv.coopmatrix<8x16xf32, Subgroup>
+  return %0: !spirv.coopmatrix<8x16xf32, Subgroup>
 }
 
 // -----
 
-func.func @composite_construct_coopmatrix_incorrect_element_type(%arg0 : i32) -> !spv.coopmatrix<8x16xf32, Subgroup> {
+func.func @composite_construct_coopmatrix_incorrect_element_type(%arg0 : i32) -> !spirv.coopmatrix<8x16xf32, Subgroup> {
   // expected-error @+1 {{operand type mismatch: expected operand type 'f32', but provided 'i32'}}
-  %0 = spv.CompositeConstruct %arg0 : (i32) -> !spv.coopmatrix<8x16xf32, Subgroup>
-  return %0: !spv.coopmatrix<8x16xf32, Subgroup>
+  %0 = spirv.CompositeConstruct %arg0 : (i32) -> !spirv.coopmatrix<8x16xf32, Subgroup>
+  return %0: !spirv.coopmatrix<8x16xf32, Subgroup>
 }
 
 // -----
 
-func.func @composite_construct_array(%arg0: f32) -> !spv.array<4xf32> {
+func.func @composite_construct_array(%arg0: f32) -> !spirv.array<4xf32> {
   // expected-error @+1 {{expected to return a vector or cooperative matrix when the number of constituents is less than what the result needs}}
-  %0 = spv.CompositeConstruct %arg0 : (f32) -> !spv.array<4xf32>
-  return %0: !spv.array<4xf32>
+  %0 = spirv.CompositeConstruct %arg0 : (f32) -> !spirv.array<4xf32>
+  return %0: !spirv.array<4xf32>
 }
 
 // -----
 
 func.func @composite_construct_vector_wrong_element_type(%arg0: f32, %arg1: f32, %arg2 : vector<2xi32>) -> vector<4xf32> {
   // expected-error @+1 {{operand element type mismatch: expected to be 'f32', but provided 'i32'}}
-  %0 = spv.CompositeConstruct %arg0, %arg2, %arg1 : (f32, vector<2xi32>, f32) -> vector<4xf32>
+  %0 = spirv.CompositeConstruct %arg0, %arg2, %arg1 : (f32, vector<2xi32>, f32) -> vector<4xf32>
   return %0: vector<4xf32>
 }
 
@@ -87,43 +87,43 @@ func.func @composite_construct_vector_wrong_element_type(%arg0: f32, %arg1: f32,
 
 func.func @composite_construct_vector_wrong_count(%arg0: f32, %arg1: f32, %arg2 : vector<2xf32>) -> vector<4xf32> {
   // expected-error @+1 {{op has incorrect number of operands: expected 4, but provided 3}}
-  %0 = spv.CompositeConstruct %arg0, %arg2 : (f32, vector<2xf32>) -> vector<4xf32>
+  %0 = spirv.CompositeConstruct %arg0, %arg2 : (f32, vector<2xf32>) -> vector<4xf32>
   return %0: vector<4xf32>
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeExtractOp
+// spirv.CompositeExtractOp
 //===----------------------------------------------------------------------===//
 
-func.func @composite_extract_array(%arg0: !spv.array<4xf32>) -> f32 {
-  // CHECK: {{%.*}} = spv.CompositeExtract {{%.*}}[1 : i32] : !spv.array<4 x f32>
-  %0 = spv.CompositeExtract %arg0[1 : i32] : !spv.array<4xf32>
+func.func @composite_extract_array(%arg0: !spirv.array<4xf32>) -> f32 {
+  // CHECK: {{%.*}} = spirv.CompositeExtract {{%.*}}[1 : i32] : !spirv.array<4 x f32>
+  %0 = spirv.CompositeExtract %arg0[1 : i32] : !spirv.array<4xf32>
   return %0: f32
 }
 
 // -----
 
-func.func @composite_extract_struct(%arg0 : !spv.struct<(f32, !spv.array<4xf32>)>) -> f32 {
-  // CHECK: {{%.*}} = spv.CompositeExtract {{%.*}}[1 : i32, 2 : i32] : !spv.struct<(f32, !spv.array<4 x f32>)>
-  %0 = spv.CompositeExtract %arg0[1 : i32, 2 : i32] : !spv.struct<(f32, !spv.array<4xf32>)>
+func.func @composite_extract_struct(%arg0 : !spirv.struct<(f32, !spirv.array<4xf32>)>) -> f32 {
+  // CHECK: {{%.*}} = spirv.CompositeExtract {{%.*}}[1 : i32, 2 : i32] : !spirv.struct<(f32, !spirv.array<4 x f32>)>
+  %0 = spirv.CompositeExtract %arg0[1 : i32, 2 : i32] : !spirv.struct<(f32, !spirv.array<4xf32>)>
   return %0 : f32
 }
 
 // -----
 
 func.func @composite_extract_vector(%arg0 : vector<4xf32>) -> f32 {
-  // CHECK: {{%.*}} = spv.CompositeExtract {{%.*}}[1 : i32] : vector<4xf32>
-  %0 = spv.CompositeExtract %arg0[1 : i32] : vector<4xf32>
+  // CHECK: {{%.*}} = spirv.CompositeExtract {{%.*}}[1 : i32] : vector<4xf32>
+  %0 = spirv.CompositeExtract %arg0[1 : i32] : vector<4xf32>
   return %0 : f32
 }
 
 // -----
 
-func.func @composite_extract_coopmatrix(%arg0 : !spv.coopmatrix<8x16xf32, Subgroup>) -> f32 {
-  // CHECK: {{%.*}} = spv.CompositeExtract {{%.*}}[2 : i32] : !spv.coopmatrix<8x16xf32, Subgroup>
-  %0 = spv.CompositeExtract %arg0[2 : i32] : !spv.coopmatrix<8x16xf32, Subgroup>
+func.func @composite_extract_coopmatrix(%arg0 : !spirv.coopmatrix<8x16xf32, Subgroup>) -> f32 {
+  // CHECK: {{%.*}} = spirv.CompositeExtract {{%.*}}[2 : i32] : !spirv.coopmatrix<8x16xf32, Subgroup>
+  %0 = spirv.CompositeExtract %arg0[2 : i32] : !spirv.coopmatrix<8x16xf32, Subgroup>
   return %0 : f32
 }
 
@@ -131,59 +131,59 @@ func.func @composite_extract_coopmatrix(%arg0 : !spv.coopmatrix<8x16xf32, Subgro
 
 func.func @composite_extract_no_ssa_operand() -> () {
   // expected-error @+1 {{expected SSA operand}}
-  %0 = spv.CompositeExtract [4 : i32, 1 : i32] : !spv.array<4x!spv.array<4xf32>>
+  %0 = spirv.CompositeExtract [4 : i32, 1 : i32] : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
 // -----
 
 func.func @composite_extract_invalid_index_type_1() -> () {
-  %0 = spv.Constant 10 : i32
-  %1 = spv.Variable : !spv.ptr<!spv.array<4x!spv.array<4xf32>>, Function>
-  %2 = spv.Load "Function" %1 ["Volatile"] : !spv.array<4x!spv.array<4xf32>>
+  %0 = spirv.Constant 10 : i32
+  %1 = spirv.Variable : !spirv.ptr<!spirv.array<4x!spirv.array<4xf32>>, Function>
+  %2 = spirv.Load "Function" %1 ["Volatile"] : !spirv.array<4x!spirv.array<4xf32>>
   // expected-error @+1 {{expected attribute value}}
-  %3 = spv.CompositeExtract %2[%0] : !spv.array<4x!spv.array<4xf32>>
+  %3 = spirv.CompositeExtract %2[%0] : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
 // -----
 
-func.func @composite_extract_invalid_index_type_2(%arg0 : !spv.array<4x!spv.array<4xf32>>) -> () {
+func.func @composite_extract_invalid_index_type_2(%arg0 : !spirv.array<4x!spirv.array<4xf32>>) -> () {
   // expected-error @+1 {{attribute 'indices' failed to satisfy constraint: 32-bit integer array attribute}}
-  %0 = spv.CompositeExtract %arg0[1] : !spv.array<4x!spv.array<4xf32>>
+  %0 = spirv.CompositeExtract %arg0[1] : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
 // -----
 
-func.func @composite_extract_invalid_index_identifier(%arg0 : !spv.array<4x!spv.array<4xf32>>) -> () {
+func.func @composite_extract_invalid_index_identifier(%arg0 : !spirv.array<4x!spirv.array<4xf32>>) -> () {
   // expected-error @+1 {{expected attribute value}}
-  %0 = spv.CompositeExtract %arg0 ]1 : i32) : !spv.array<4x!spv.array<4xf32>>
+  %0 = spirv.CompositeExtract %arg0 ]1 : i32) : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
 // -----
 
-func.func @composite_extract_2D_array_out_of_bounds_access_1(%arg0: !spv.array<4x!spv.array<4xf32>>) -> () {
-  // expected-error @+1 {{index 4 out of bounds for '!spv.array<4 x !spv.array<4 x f32>>'}}
-  %0 = spv.CompositeExtract %arg0[4 : i32, 1 : i32] : !spv.array<4x!spv.array<4xf32>>
+func.func @composite_extract_2D_array_out_of_bounds_access_1(%arg0: !spirv.array<4x!spirv.array<4xf32>>) -> () {
+  // expected-error @+1 {{index 4 out of bounds for '!spirv.array<4 x !spirv.array<4 x f32>>'}}
+  %0 = spirv.CompositeExtract %arg0[4 : i32, 1 : i32] : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
 // -----
 
-func.func @composite_extract_2D_array_out_of_bounds_access_2(%arg0: !spv.array<4x!spv.array<4xf32>>
+func.func @composite_extract_2D_array_out_of_bounds_access_2(%arg0: !spirv.array<4x!spirv.array<4xf32>>
 ) -> () {
-  // expected-error @+1 {{index 4 out of bounds for '!spv.array<4 x f32>'}}
-  %0 = spv.CompositeExtract %arg0[1 : i32, 4 : i32] : !spv.array<4x!spv.array<4xf32>>
+  // expected-error @+1 {{index 4 out of bounds for '!spirv.array<4 x f32>'}}
+  %0 = spirv.CompositeExtract %arg0[1 : i32, 4 : i32] : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
 // -----
 
-func.func @composite_extract_struct_element_out_of_bounds_access(%arg0 : !spv.struct<(f32, !spv.array<4xf32>)>) -> () {
-  // expected-error @+1 {{index 2 out of bounds for '!spv.struct<(f32, !spv.array<4 x f32>)>'}}
-  %0 = spv.CompositeExtract %arg0[2 : i32, 0 : i32] : !spv.struct<(f32, !spv.array<4xf32>)>
+func.func @composite_extract_struct_element_out_of_bounds_access(%arg0 : !spirv.struct<(f32, !spirv.array<4xf32>)>) -> () {
+  // expected-error @+1 {{index 2 out of bounds for '!spirv.struct<(f32, !spirv.array<4 x f32>)>'}}
+  %0 = spirv.CompositeExtract %arg0[2 : i32, 0 : i32] : !spirv.struct<(f32, !spirv.array<4xf32>)>
   return
 }
 
@@ -191,15 +191,15 @@ func.func @composite_extract_struct_element_out_of_bounds_access(%arg0 : !spv.st
 
 func.func @composite_extract_vector_out_of_bounds_access(%arg0: vector<4xf32>) -> () {
   // expected-error @+1 {{index 4 out of bounds for 'vector<4xf32>'}}
-  %0 = spv.CompositeExtract %arg0[4 : i32] : vector<4xf32>
+  %0 = spirv.CompositeExtract %arg0[4 : i32] : vector<4xf32>
   return
 }
 
 // -----
 
-func.func @composite_extract_invalid_types_1(%arg0: !spv.array<4x!spv.array<4xf32>>) -> () {
+func.func @composite_extract_invalid_types_1(%arg0: !spirv.array<4x!spirv.array<4xf32>>) -> () {
   // expected-error @+1 {{cannot extract from non-composite type 'f32' with index 3}}
-  %0 = spv.CompositeExtract %arg0[1 : i32, 2 : i32, 3 : i32] : !spv.array<4x!spv.array<4xf32>>
+  %0 = spirv.CompositeExtract %arg0[1 : i32, 2 : i32, 3 : i32] : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
@@ -207,117 +207,117 @@ func.func @composite_extract_invalid_types_1(%arg0: !spv.array<4x!spv.array<4xf3
 
 func.func @composite_extract_invalid_types_2(%arg0: f32) -> () {
   // expected-error @+1 {{cannot extract from non-composite type 'f32' with index 1}}
-  %0 = spv.CompositeExtract %arg0[1 : i32] : f32
+  %0 = spirv.CompositeExtract %arg0[1 : i32] : f32
   return
 }
 
 // -----
 
-func.func @composite_extract_invalid_extracted_type(%arg0: !spv.array<4x!spv.array<4xf32>>) -> () {
-  // expected-error @+1 {{expected at least one index for spv.CompositeExtract}}
-  %0 = spv.CompositeExtract %arg0[] : !spv.array<4x!spv.array<4xf32>>
+func.func @composite_extract_invalid_extracted_type(%arg0: !spirv.array<4x!spirv.array<4xf32>>) -> () {
+  // expected-error @+1 {{expected at least one index for spirv.CompositeExtract}}
+  %0 = spirv.CompositeExtract %arg0[] : !spirv.array<4x!spirv.array<4xf32>>
   return
 }
 
 // -----
 
-func.func @composite_extract_result_type_mismatch(%arg0: !spv.array<4xf32>) -> i32 {
+func.func @composite_extract_result_type_mismatch(%arg0: !spirv.array<4xf32>) -> i32 {
   // expected-error @+1 {{invalid result type: expected 'f32' but provided 'i32'}}
-  %0 = "spv.CompositeExtract"(%arg0) {indices = [2: i32]} : (!spv.array<4xf32>) -> (i32)
+  %0 = "spirv.CompositeExtract"(%arg0) {indices = [2: i32]} : (!spirv.array<4xf32>) -> (i32)
   return %0: i32
 }
 
 // -----
 
 //===----------------------------------------------------------------------===//
-// spv.CompositeInsert
+// spirv.CompositeInsert
 //===----------------------------------------------------------------------===//
 
-func.func @composite_insert_array(%arg0: !spv.array<4xf32>, %arg1: f32) -> !spv.array<4xf32> {
-  // CHECK: {{%.*}} = spv.CompositeInsert {{%.*}}, {{%.*}}[1 : i32] : f32 into !spv.array<4 x f32>
-  %0 = spv.CompositeInsert %arg1, %arg0[1 : i32] : f32 into !spv.array<4xf32>
-  return %0: !spv.array<4xf32>
+func.func @composite_insert_array(%arg0: !spirv.array<4xf32>, %arg1: f32) -> !spirv.array<4xf32> {
+  // CHECK: {{%.*}} = spirv.CompositeInsert {{%.*}}, {{%.*}}[1 : i32] : f32 into !spirv.array<4 x f32>
+  %0 = spirv.CompositeInsert %arg1, %arg0[1 : i32] : f32 into !spirv.array<4xf32>
+  return %0: !spirv.array<4xf32>
 }
 
 // -----
 
-func.func @composite_insert_struct(%arg0: !spv.struct<(!spv.array<4xf32>, f32)>, %arg1: !spv.array<4xf32>) -> !spv.struct<(!spv.array<4xf32>, f32)> {
-  // CHECK: {{%.*}} = spv.CompositeInsert {{%.*}}, {{%.*}}[0 : i32] : !spv.array<4 x f32> into !spv.struct<(!spv.array<4 x f32>, f32)>
-  %0 = spv.CompositeInsert %arg1, %arg0[0 : i32] : !spv.array<4xf32> into !spv.struct<(!spv.array<4xf32>, f32)>
-  return %0: !spv.struct<(!spv.array<4xf32>, f32)>
+func.func @composite_insert_struct(%arg0: !spirv.struct<(!spirv.array<4xf32>, f32)>, %arg1: !spirv.array<4xf32>) -> !spirv.struct<(!spirv.array<4xf32>, f32)> {
+  // CHECK: {{%.*}} = spirv.CompositeInsert {{%.*}}, {{%.*}}[0 : i32] : !spirv.array<4 x f32> into !spirv.struct<(!spirv.array<4 x f32>, f32)>
+  %0 = spirv.CompositeInsert %arg1, %arg0[0 : i32] : !spirv.array<4xf32> into !spirv.struct<(!spirv.array<4xf32>, f32)>
+  return %0: !spirv.struct<(!spirv.array<4xf32>, f32)>
 }
 
 // -----
 
-func.func @composite_insert_coopmatrix(%arg0: !spv.coopmatrix<8x16xi32, Subgroup>, %arg1: i32) -> !spv.coopmatrix<8x16xi32, Subgroup> {
-  // CHECK: {{%.*}} = spv.CompositeInsert {{%.*}}, {{%.*}}[5 : i32] : i32 into !spv.coopmatrix<8x16xi32, Subgroup>
-  %0 = spv.CompositeInsert %arg1, %arg0[5 : i32] : i32 into !spv.coopmatrix<8x16xi32, Subgroup>
-  return %0: !spv.coopmatrix<8x16xi32, Subgroup>
+func.func @composite_insert_coopmatrix(%arg0: !spirv.coopmatrix<8x16xi32, Subgroup>, %arg1: i32) -> !spirv.coopmatrix<8x16xi32, Subgroup> {
+  // CHECK: {{%.*}} = spirv.CompositeInsert {{%.*}}, {{%.*}}[5 : i32] : i32 into !spirv.coopmatrix<8x16xi32, Subgroup>
+  %0 = spirv.CompositeInsert %arg1, %arg0[5 : i32] : i32 into !spirv.coopmatrix<8x16xi32, Subgroup>
+  return %0: !spirv.coopmatrix<8x16xi32, Subgroup>
 }
 
 // -----
 
-func.func @composite_insert_no_indices(%arg0: !spv.array<4xf32>, %arg1: f32) -> !spv.array<4xf32> {
+func.func