[flang-commits] [flang] [mlir][llvm] Replace NullOp by ZeroOp (PR #67183)

Tobias Gysi via flang-commits flang-commits at lists.llvm.org
Fri Sep 22 12:24:51 PDT 2023


https://github.com/gysit created https://github.com/llvm/llvm-project/pull/67183

This revision replaces the LLVM dialect NullOp by the recently introduced ZeroOp. The ZeroOp is more generic in the sense that it represents zero values of any LLVM type rather than null pointers only.

This is a follow to https://github.com/llvm/llvm-project/pull/65508

>From 48d6a5a051f4f2598c1e4452923d6db9c4f6ea9d Mon Sep 17 00:00:00 2001
From: Tobias Gysi <tobias.gysi at nextsilicon.com>
Date: Fri, 22 Sep 2023 18:55:49 +0000
Subject: [PATCH] [mlir][llvm] Replace NullOp by ZeroOp

This revision replaces the LLVM dialect NullOp by the recently
introduced ZeroOp. The ZeroOp is more generic in the sense that it
represents zero values of any LLVM type rather than null pointers only.

This is a follow to https://github.com/llvm/llvm-project/pull/65508
---
 flang/lib/Optimizer/CodeGen/CodeGen.cpp       | 14 ++++----
 flang/test/Fir/convert-to-llvm.fir            | 36 +++++++++----------
 flang/test/Fir/embox-char.fir                 |  4 +--
 flang/test/Fir/embox-substring.fir            |  2 +-
 flang/test/Fir/tbaa.fir                       |  4 +--
 mlir/docs/Dialects/LLVM.md                    | 11 +++---
 .../mlir/Conversion/LLVMCommon/Pattern.h      |  2 +-
 mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td   | 27 --------------
 .../Conversion/AsyncToLLVM/AsyncToLLVM.cpp    |  4 +--
 .../GPUCommon/GPUToLLVMConversion.cpp         |  4 +--
 mlir/lib/Conversion/LLVMCommon/Pattern.cpp    |  4 +--
 mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp    |  8 ++---
 .../Transforms/SparseTensorConversion.cpp     |  2 +-
 mlir/lib/Target/LLVMIR/ModuleImport.cpp       |  2 +-
 .../AsyncToLLVM/convert-coro-to-llvm.mlir     |  2 +-
 .../AsyncToLLVM/convert-runtime-to-llvm.mlir  |  2 +-
 ...ower-launch-func-to-gpu-runtime-calls.mlir |  2 +-
 .../Conversion/GPUToVulkan/invoke-vulkan.mlir |  2 +-
 .../GPUToVulkan/typed-pointers.mlir           |  2 +-
 .../convert-dynamic-memref-ops.mlir           | 14 ++++----
 .../convert-static-memref-ops.mlir            |  8 ++---
 .../MemRefToLLVM/memref-to-llvm.mlir          |  6 ++--
 .../MemRefToLLVM/typed-pointers.mlir          |  4 +--
 mlir/test/Dialect/GPU/ops.mlir                |  2 +-
 mlir/test/Dialect/LLVMIR/callgraph.mlir       |  4 +--
 mlir/test/Dialect/LLVMIR/canonicalize.mlir    |  2 +-
 mlir/test/Dialect/LLVMIR/invalid.mlir         |  6 ++--
 mlir/test/Dialect/LLVMIR/mem2reg.mlir         | 10 +++---
 .../LLVMIR/roundtrip-typed-pointers.mlir      |  8 ++---
 mlir/test/Dialect/LLVMIR/roundtrip.mlir       |  8 ++---
 .../test/Dialect/SparseTensor/conversion.mlir |  2 +-
 .../SparseTensor/convert_dense2sparse.mlir    |  8 ++---
 .../Dialect/SparseTensor/sparse_concat.mlir   |  6 ++--
 .../SparseTensor/sparse_fill_zero.mlir        |  2 +-
 mlir/test/Target/LLVMIR/Import/constant.ll    |  6 ++--
 mlir/test/Target/LLVMIR/Import/exception.ll   |  4 +--
 .../test/Target/LLVMIR/Import/instructions.ll |  2 +-
 .../Target/LLVMIR/Import/zeroinitializer.ll   |  2 +-
 .../test/Target/LLVMIR/llvmir-intrinsics.mlir |  8 ++---
 mlir/test/Target/LLVMIR/llvmir.mlir           |  4 +--
 .../omptarget-declare-target-llvm-host.mlir   | 34 +++++++++---------
 .../test/Transforms/test-convert-call-op.mlir |  2 +-
 .../FuncToLLVM/TestConvertCallOp.cpp          |  2 +-
 43 files changed, 131 insertions(+), 157 deletions(-)

diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index 6a6a10f632648cd..9d92086f8d70908 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -1150,7 +1150,7 @@ computeElementDistance(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy,
   // *)0 + 1)' trick for all types. The generated instructions are optimized
   // into constant by the first pass of InstCombine, so it should not be a
   // performance issue.
-  auto nullPtr = rewriter.create<mlir::LLVM::NullOp>(loc, ptrTy);
+  auto nullPtr = rewriter.create<mlir::LLVM::ZeroOp>(loc, ptrTy);
   auto gep = rewriter.create<mlir::LLVM::GEPOp>(
       loc, ptrTy, nullPtr, llvm::ArrayRef<mlir::LLVM::GEPArg>{1});
   return rewriter.create<mlir::LLVM::PtrToIntOp>(loc, idxTy, gep);
@@ -1431,7 +1431,7 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
             name, Fortran::semantics::typeInfoBuiltinModule))
       fir::emitFatalError(
           loc, "runtime derived type info descriptor was not generated");
-    return rewriter.create<mlir::LLVM::NullOp>(
+    return rewriter.create<mlir::LLVM::ZeroOp>(
         loc, ::getVoidPtrType(mod.getContext()));
   }
 
@@ -1474,7 +1474,7 @@ struct EmboxCommonConversion : public FIROpConversion<OP> {
           } else {
             // Unlimited polymorphic type descriptor with no record type. Set
             // type descriptor address to a clean state.
-            typeDesc = rewriter.create<mlir::LLVM::NullOp>(
+            typeDesc = rewriter.create<mlir::LLVM::ZeroOp>(
                 loc, ::getVoidPtrType(mod.getContext()));
           }
         } else {
@@ -3407,7 +3407,7 @@ struct ZeroOpConversion : public FIROpConversion<fir::ZeroOp> {
                   mlir::ConversionPatternRewriter &rewriter) const override {
     mlir::Type ty = convertType(zero.getType());
     if (ty.isa<mlir::LLVM::LLVMPointerType>()) {
-      rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(zero, ty);
+      rewriter.replaceOpWithNewOp<mlir::LLVM::ZeroOp>(zero, ty);
     } else if (ty.isa<mlir::IntegerType>()) {
       rewriter.replaceOpWithNewOp<mlir::LLVM::ConstantOp>(
           zero, ty, mlir::IntegerAttr::get(ty, 0));
@@ -3470,7 +3470,7 @@ struct IsPresentOpConversion : public FIROpConversion<fir::IsPresentOp> {
 };
 
 /// Create value signaling an absent optional argument in a call, e.g.
-/// `fir.absent !fir.ref<i64>` -->  `llvm.mlir.null : !llvm.ptr<i64>`
+/// `fir.absent !fir.ref<i64>` -->  `llvm.mlir.zero : !llvm.ptr<i64>`
 struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> {
   using FIROpConversion::FIROpConversion;
 
@@ -3485,11 +3485,11 @@ struct AbsentOpConversion : public FIROpConversion<fir::AbsentOp> {
       assert(!structTy.isOpaque() && !structTy.getBody().empty());
       auto undefStruct = rewriter.create<mlir::LLVM::UndefOp>(loc, ty);
       auto nullField =
-          rewriter.create<mlir::LLVM::NullOp>(loc, structTy.getBody()[0]);
+          rewriter.create<mlir::LLVM::ZeroOp>(loc, structTy.getBody()[0]);
       rewriter.replaceOpWithNewOp<mlir::LLVM::InsertValueOp>(
           absent, undefStruct, nullField, 0);
     } else {
-      rewriter.replaceOpWithNewOp<mlir::LLVM::NullOp>(absent, ty);
+      rewriter.replaceOpWithNewOp<mlir::LLVM::ZeroOp>(absent, ty);
     }
     return mlir::success();
   }
diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir
index 52716afe3198d91..e39f13ac98268e8 100644
--- a/flang/test/Fir/convert-to-llvm.fir
+++ b/flang/test/Fir/convert-to-llvm.fir
@@ -145,7 +145,7 @@ func.func @zero_test_ptr() {
   return
 }
 
-// CHECK: %{{.*}} = llvm.mlir.null : !llvm.ptr<f32>
+// CHECK: %{{.*}} = llvm.mlir.zero : !llvm.ptr<f32>
 // CHECK-NOT: fir.zero_bits
 
 // -----
@@ -201,7 +201,7 @@ func.func @test_alloc_and_freemem_one() {
 }
 
 // CHECK-LABEL:  llvm.func @test_alloc_and_freemem_one() {
-// CHECK-NEXT:    %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<i32>
+// CHECK-NEXT:    %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<i32>
 // CHECK-NEXT:    %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK-NEXT:    %[[N:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<i32> to i64
 // CHECK-NEXT:    llvm.call @malloc(%[[N]])
@@ -220,7 +220,7 @@ func.func @test_alloc_and_freemem_several() {
 }
 
 // CHECK-LABEL:  llvm.func @test_alloc_and_freemem_several() {
-// CHECK: [[NULL:%.*]]  = llvm.mlir.null : !llvm.ptr<array<100 x f32>>
+// CHECK: [[NULL:%.*]]  = llvm.mlir.zero : !llvm.ptr<array<100 x f32>>
 // CHECK: [[PTR:%.*]]  = llvm.getelementptr [[NULL]][{{.*}}] : (!llvm.ptr<array<100 x f32>>) -> !llvm.ptr<array<100 x f32>>
 // CHECK: [[N:%.*]]  = llvm.ptrtoint [[PTR]] : !llvm.ptr<array<100 x f32>> to i64
 // CHECK: [[MALLOC:%.*]] = llvm.call @malloc([[N]])
@@ -238,7 +238,7 @@ func.func @test_with_shape(%ncols: index, %nrows: index) {
 
 // CHECK-LABEL: llvm.func @test_with_shape
 // CHECK-SAME: %[[NCOLS:.*]]: i64, %[[NROWS:.*]]: i64
-// CHECK:   %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+// CHECK:   %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<f32>
 // CHECK:   %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK:   %[[FOUR:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<f32> to i64
 // CHECK:   %[[DIM1_SIZE:.*]] = llvm.mul %[[FOUR]], %[[NCOLS]]  : i64
@@ -258,7 +258,7 @@ func.func @test_string_with_shape(%len: index, %nelems: index) {
 
 // CHECK-LABEL: llvm.func @test_string_with_shape
 // CHECK-SAME: %[[LEN:.*]]: i64, %[[NELEMS:.*]]: i64)
-// CHECK:   %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK:   %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:   %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK:   %[[ONE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<i8> to i64
 // CHECK:   %[[LEN_SIZE:.*]] = llvm.mul %[[ONE]], %[[LEN]]  : i64
@@ -750,7 +750,7 @@ func.func @convert_from_i1(%arg0 : i1) {
 
 // CHECK-LABEL: convert_from_i1(
 // CHECK-SAME: %[[ARG0:.*]]: i1
-// CHECK:        %{{.*}} = llvm.zext %[[ARG0]] : i1 to i32 
+// CHECK:        %{{.*}} = llvm.zext %[[ARG0]] : i1 to i32
 
 // -----
 
@@ -1403,7 +1403,7 @@ func.func @test_absent_i64() -> () {
 }
 
 // CHECK-LABEL: @test_absent_i64
-// CHECK-NEXT:  %{{.*}} = llvm.mlir.null : !llvm.ptr<i64>
+// CHECK-NEXT:  %{{.*}} = llvm.mlir.zero : !llvm.ptr<i64>
 // CHECK-NEXT:  llvm.return
 // CHECK-NEXT:  }
 
@@ -1412,7 +1412,7 @@ func.func @test_absent_box() -> () {
   return
 }
 // CHECK-LABEL: @test_absent_box
-// CHECK-NEXT:  %{{.*}} = llvm.mlir.null : !llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
+// CHECK-NEXT:  %{{.*}} = llvm.mlir.zero : !llvm.ptr<struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>
 // CHECK-NEXT:  llvm.return
 // CHECK-NEXT:  }
 
@@ -1442,7 +1442,7 @@ func.func @absent() -> i1 {
 
 // CHECK-LABEL: @absent
 // CHECK-SAME:  () -> i1
-// CHECK-NEXT:  %[[ptr:.*]] = llvm.mlir.null : !llvm.ptr<i64>
+// CHECK-NEXT:  %[[ptr:.*]] = llvm.mlir.zero : !llvm.ptr<i64>
 // CHECK-NEXT:  %[[ret_val:.*]] = llvm.call @is_present(%[[ptr]]) : (!llvm.ptr<i64>) -> i1
 // CHECK-NEXT:  llvm.return %[[ret_val]] : i1
 
@@ -1525,7 +1525,7 @@ func.func @box_tdesc(%arg0: !fir.box<!fir.type<dtdesc{a:i32}>>) {
 
 // CHECK-LABEL: llvm.func @box_tdesc(
 // CHECK-SAME:                       %[[ARG0:.*]]: !llvm.ptr<struct<(ptr<struct<"dtdesc", (i{{.*}})>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i{{.*}}>, array<1 x i{{.*}}>)>>) {
-// CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 7] : (!llvm.ptr<struct<(ptr<struct<"dtdesc", (i{{.*}})>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i{{.*}}>, array<1 x i{{.*}}>)>>) -> !llvm.ptr<ptr<i8>> 
+// CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 7] : (!llvm.ptr<struct<(ptr<struct<"dtdesc", (i{{.*}})>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr<i{{.*}}>, array<1 x i{{.*}}>)>>) -> !llvm.ptr<ptr<i8>>
 // CHECK:         %[[LOAD:.*]] = llvm.load %[[GEP]] : !llvm.ptr<ptr<i{{.*}}>>
 
 // -----
@@ -1547,7 +1547,7 @@ func.func @embox0(%arg0: !fir.ref<!fir.array<100xi32>>) {
 // CHECK:         %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32
 // CHECK:         %[[ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr<array<100 x i32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<(ptr<array<100 x i32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>
 // CHECK:         %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32
-// CHECK:         %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<i32>
+// CHECK:         %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<i32>
 // CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK:         %[[I64_ELEM_SIZE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<i32> to i64
 // CHECK:         %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<array<100 x i32>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
@@ -1771,7 +1771,7 @@ func.func @xembox0(%arg0: !fir.ref<!fir.array<?xi32>>) {
 // CHECK:         %[[ALLOCA:.*]] = llvm.alloca %[[ALLOCA_SIZE]] x !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>
 // CHECK:         %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
 // CHECK:         %[[TYPE:.*]] = llvm.mlir.constant(9 : i32) : i32
-// CHECK:         %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<i32>
+// CHECK:         %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<i32>
 // CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK:         %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<i32> to i64
 // CHECK:         %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
@@ -1820,7 +1820,7 @@ func.func @xembox1(%arg0: !fir.ref<!fir.array<?x!fir.char<1, 10>>>) {
 
 // CHECK-LABEL: llvm.func @xembox1(%{{.*}}: !llvm.ptr<array<10 x i8>>) {
 // CHECK:         %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK:         %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<array<10 x i8>>
+// CHECK:         %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<array<10 x i8>>
 // CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK:         %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<array<10 x i8>> to i64
 // CHECK:         %{{.*}} = llvm.insertvalue %[[ELEM_LEN_I64]], %{{.*}}[1] : !llvm.struct<(ptr<array<10 x i8>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
@@ -1870,7 +1870,7 @@ func.func private @_QPxb(!fir.box<!fir.array<?x?xf64>>)
 // CHECK:         %[[ARR_SIZE:.*]] = llvm.mul %[[ARR_SIZE_TMP1]], %[[N2]]  : i64
 // CHECK:         %[[ARR:.*]] = llvm.alloca %[[ARR_SIZE]] x f64 {bindc_name = "arr", in_type = !fir.array<?x?xf64>, operandSegmentSizes = array<i32: 0, 2>, uniq_name = "_QFsbEarr"} : (i64) -> !llvm.ptr<f64>
 // CHECK:         %[[TYPE_CODE:.*]] = llvm.mlir.constant(28 : i32) : i32
-// CHECK:         %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<f64>
+// CHECK:         %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<f64>
 // CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK:         %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<f64> to i64
 // CHECK:         %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f64>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
@@ -1949,7 +1949,7 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
 // CHECK:         %[[ALLOCA_SIZE_X:.*]] = llvm.mlir.constant(1 : i64) : i64
 // CHECK:         %[[X:.*]] = llvm.alloca %[[ALLOCA_SIZE_X]] x !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>> {bindc_name = "x", in_type = !fir.array<20x!fir.type<_QFtest_dt_sliceTt{i:i32,j:i32}>>, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFtest_dt_sliceEx"} : (i64) -> !llvm.ptr<array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>>>
 // CHECK:         %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32
-// CHECK:         %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<i32>
+// CHECK:         %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<i32>
 // CHECK:         %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 // CHECK:         %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<i32> to i64
 // CHECK:         %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
@@ -1969,7 +1969,7 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
 // CHECK:         %[[BOX6:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[BOX5]][6] : !llvm.struct<(ptr<i32>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
 // CHECK:         %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i64
 // CHECK:         %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK:         %[[ELE_TYPE:.*]] = llvm.mlir.null : !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
+// CHECK:         %[[ELE_TYPE:.*]] = llvm.mlir.zero : !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
 // CHECK:         %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][1] : (!llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>) -> !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>>
 // CHECK:         %[[PTRTOINT_DTYPE_SIZE:.*]] = llvm.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr<struct<"_QFtest_dt_sliceTt", (i32, i32)>> to i64
 // CHECK:         %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C1]], %[[ONE]]  : i64
@@ -2261,7 +2261,7 @@ func.func @test_rebox_1(%arg0: !fir.box<!fir.array<?x?xf32>>) {
 //CHECK:    %[[SIX:.*]] = llvm.mlir.constant(6 : index) : i64
 //CHECK:    %[[EIGHTY:.*]] = llvm.mlir.constant(80 : index) : i64
 //CHECK:    %[[FLOAT_TYPE:.*]] = llvm.mlir.constant(27 : i32) : i32
-//CHECK:    %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+//CHECK:    %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<f32>
 //CHECK:    %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 //CHECK:    %[[ELEM_SIZE_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<f32> to i64
 //CHECK:    %[[RBOX:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<f32>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
@@ -2334,7 +2334,7 @@ func.func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}
 //CHECK:   %[[COMPONENT_OFFSET_1:.*]] = llvm.mlir.constant(1 : i64) : i64
 //CHECK:   %[[ELEM_COUNT:.*]] = llvm.mlir.constant(7 : i64) : i64
 //CHECK:   %[[TYPE_CHAR:.*]] = llvm.mlir.constant(40 : i32) : i32
-//CHECK:   %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+//CHECK:   %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 //CHECK:   %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
 //CHECK:   %[[CHAR_SIZE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr<i8> to i64
 //CHECK:   %[[ELEM_SIZE:.*]] = llvm.mul %[[CHAR_SIZE]], %[[ELEM_COUNT]]
diff --git a/flang/test/Fir/embox-char.fir b/flang/test/Fir/embox-char.fir
index 8a7ef396abab0e4..2e8a826e0b8d501 100644
--- a/flang/test/Fir/embox-char.fir
+++ b/flang/test/Fir/embox-char.fir
@@ -40,7 +40,7 @@
 // CHECK:           %[[VAL_30_ST0:.*]] = llvm.load %[[VAL_29]] : !llvm.ptr<i64>
 // CHECK:           %[[VAL_31_LEN:.*]] = llvm.sdiv %[[VAL_16_BYTESIZE]], %[[VAL_13_WIDTH]]  : i64
 // CHECK:           %[[VAL_32:.*]] = llvm.mlir.constant(44 : i32) : i32
-// CHECK:           %[[VAL_33:.*]] = llvm.mlir.null : !llvm.ptr<i32>
+// CHECK:           %[[VAL_33:.*]] = llvm.mlir.zero : !llvm.ptr<i32>
 // CHECK:           %[[VAL_34:.*]] = llvm.getelementptr %[[VAL_33]][1] : (!llvm.ptr<i32>) -> !llvm.ptr<i32>
 // CHECK:           %[[VAL_35:.*]] = llvm.ptrtoint %[[VAL_34]] : !llvm.ptr<i32> to i64
 // CHECK:           %[[VAL_36_BYTESIZE:.*]] = llvm.mul %[[VAL_35]], %[[VAL_31_LEN]]  : i64
@@ -137,7 +137,7 @@ func.func @test_char4(%arg0: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?x!fir.cha
 // CHECK:           %[[VAL_29:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_11]], 2] : (!llvm.ptr<struct<(ptr<i8>, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr<i64>
 // CHECK:           %[[VAL_30_ST0:.*]] = llvm.load %[[VAL_29]] : !llvm.ptr<i64>
 // CHECK:           %[[VAL_32:.*]] = llvm.mlir.constant(40 : i32) : i32
-// CHECK:           %[[VAL_33:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK:           %[[VAL_33:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:           %[[VAL_34:.*]] = llvm.getelementptr %[[VAL_33]][1] : (!llvm.ptr<i8>) -> !llvm.ptr<i8>
 // CHECK:           %[[VAL_35:.*]] = llvm.ptrtoint %[[VAL_34]] : !llvm.ptr<i8> to i64
 // CHECK:           %[[VAL_36_BYTESIZE:.*]] = llvm.mul %[[VAL_35]], %[[VAL_16_BYTESIZE]]  : i64
diff --git a/flang/test/Fir/embox-substring.fir b/flang/test/Fir/embox-substring.fir
index fd0b5923df0ed65..4e2395bc7f34571 100644
--- a/flang/test/Fir/embox-substring.fir
+++ b/flang/test/Fir/embox-substring.fir
@@ -30,7 +30,7 @@ func.func private @dump(!fir.box<!fir.array<2x!fir.char<1>>>)
 // CHECK-SAME:                                  %[[VAL_1:.*]]: i64) {
 // CHECK:           %[[VAL_5:.*]] = llvm.mlir.constant(1 : index) : i64
 // CHECK:           llvm.getelementptr
-// CHECK:           %[[VAL_28:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK:           %[[VAL_28:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:           %[[VAL_29:.*]] = llvm.getelementptr %[[VAL_28]][1] : (!llvm.ptr<i8>) -> !llvm.ptr<i8>
 // CHECK:           %[[VAL_30:.*]] = llvm.ptrtoint %[[VAL_29]] : !llvm.ptr<i8> to i64
 // CHECK:           %[[VAL_31:.*]] = llvm.mul %[[VAL_30]], %[[VAL_1]]  : i64
diff --git a/flang/test/Fir/tbaa.fir b/flang/test/Fir/tbaa.fir
index 66bd41bad18e77b..f8a52b2e98db0cc 100644
--- a/flang/test/Fir/tbaa.fir
+++ b/flang/test/Fir/tbaa.fir
@@ -205,7 +205,7 @@ module {
 
 // CHECK-LABEL:   llvm.mlir.global internal @_QFEx() {addr_space = 0 : i32} : !llvm.struct<(ptr<struct<()>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)> {
 // CHECK:           %[[VAL_0:.*]] = llvm.mlir.constant(0 : index) : i64
-// CHECK:           %[[VAL_1:.*]] = llvm.mlir.null : !llvm.ptr<struct<()>>
+// CHECK:           %[[VAL_1:.*]] = llvm.mlir.zero : !llvm.ptr<struct<()>>
 // CHECK:           %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64
 // CHECK:           %[[VAL_3:.*]] = llvm.mlir.constant(-1 : i32) : i32
 // CHECK:           %[[VAL_4:.*]] = llvm.mlir.undef : !llvm.struct<(ptr<struct<()>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>
@@ -223,7 +223,7 @@ module {
 // CHECK:           %[[VAL_16:.*]] = llvm.mlir.constant(1 : i32) : i32
 // CHECK:           %[[VAL_17:.*]] = llvm.trunc %[[VAL_16]] : i32 to i8
 // CHECK:           %[[VAL_18:.*]] = llvm.insertvalue %[[VAL_17]], %[[VAL_15]][6] : !llvm.struct<(ptr<struct<()>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>
-// CHECK:           %[[VAL_19:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK:           %[[VAL_19:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:           %[[VAL_20:.*]] = llvm.bitcast %[[VAL_19]] : !llvm.ptr<i8> to !llvm.ptr<i8>
 // CHECK:           %[[VAL_21:.*]] = llvm.insertvalue %[[VAL_20]], %[[VAL_18]][8] : !llvm.struct<(ptr<struct<()>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr<i8>, array<1 x i64>)>
 // CHECK:           %[[VAL_22:.*]] = llvm.mlir.constant(0 : i64) : i64
diff --git a/mlir/docs/Dialects/LLVM.md b/mlir/docs/Dialects/LLVM.md
index fa5ce630ad43b01..dae2b03b5d5ceee 100644
--- a/mlir/docs/Dialects/LLVM.md
+++ b/mlir/docs/Dialects/LLVM.md
@@ -106,7 +106,7 @@ are produced by dedicated operations that have the corresponding semantics:
 [`llvm.mlir.constant`](#llvmmlirconstant-mlirllvmconstantop),
 [`llvm.mlir.undef`](#llvmmlirundef-mlirllvmundefop),
 [`llvm.mlir.poison`](#llvmmlirpoison-mlirllvmpoisonop),
-[`llvm.mlir.null`](#llvmmlirnull-mlirllvmnullop). Note how these operations are
+[`llvm.mlir.zero`](#llvmmlirzero-mlirllvmzeroop). Note how these operations are
 prefixed with `mlir.` to indicate that they don't belong to LLVM IR but are only
 necessary to model it in MLIR. The values produced by these operations are
 usable just like any other value.
@@ -118,11 +118,12 @@ Examples:
 // by a float.
 %0 = llvm.mlir.undef : !llvm.struct<(i32, f32)>
 
-// Null pointer to i8.
-%1 = llvm.mlir.null : !llvm.ptr<i8>
+// Null pointer.
+%1 = llvm.mlir.zero : !llvm.ptr
 
-// Null pointer to a function with signature void().
-%2 = llvm.mlir.null : !llvm.ptr<func<void ()>>
+// Create an zero initialized value of structure type with a 32-bit integer
+// followed by a float.
+%2 = llvm.mlir.zero :  !llvm.struct<(i32, f32)>
 
 // Constant 42 as i32.
 %3 = llvm.mlir.constant(42 : i32) : i32
diff --git a/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h b/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h
index 92f4025ffffffbf..aea6c38a441d3a5 100644
--- a/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h
+++ b/mlir/include/mlir/Conversion/LLVMCommon/Pattern.h
@@ -89,7 +89,7 @@ class ConvertToLLVMPattern : public ConversionPattern {
   /// `strides[1]` = llvm.mlir.constant(1 : index) : i64
   /// `strides[0]` = `sizes[0]`
   /// %size        = llvm.mul `sizes[0]`, `sizes[1]` : i64
-  /// %nullptr     = llvm.mlir.null : !llvm.ptr<f32>
+  /// %nullptr     = llvm.mlir.zero : !llvm.ptr<f32>
   /// %gep         = llvm.getelementptr %nullptr[%size]
   ///                  : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
   /// `sizeBytes`  = llvm.ptrtoint %gep : !llvm.ptr<f32> to i64
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index c0216d1971e58d2..e4486eb36e51a1f 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -1438,33 +1438,6 @@ def LLVM_LLVMFuncOp : LLVM_Op<"func", [
   let hasRegionVerifier = 1;
 }
 
-def LLVM_NullOp
-    : LLVM_Op<"mlir.null", [Pure]>,
-      LLVM_Builder<"$res = llvm::ConstantPointerNull::get("
-                   "    cast<llvm::PointerType>($_resultType));"> {
-  let summary = "Defines a value containing a null pointer to LLVM type.";
-  let description = [{
-    Unlike LLVM IR, MLIR does not have first-class null pointers. They must be
-    explicitly created as SSA values using `llvm.mlir.null`. This operation has
-    no operands or attributes, and returns a null value of a wrapped LLVM IR
-    pointer type.
-
-    Examples:
-
-    ```mlir
-    // Null pointer to i8.
-    %0 = llvm.mlir.null : !llvm.ptr<i8>
-
-    // Null pointer to a function with signature void().
-    %1 = llvm.mlir.null : !llvm.ptr<func<void ()>>
-    ```
-  }];
-
-  let results = (outs LLVM_AnyPointer:$res);
-  let builders = [LLVM_OneResultOpBuilder];
-  let assemblyFormat = "attr-dict `:` qualified(type($res))";
-}
-
 def LLVM_NoneTokenOp
     : LLVM_Op<"mlir.none", [Pure]> {
   let summary = "Defines a value containing an empty token to LLVM type.";
diff --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index 1c3fbe8057c5bee..d9ea60a6749d926 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -421,7 +421,7 @@ class CoroIdOpConversion : public AsyncOpConversionPattern<CoroIdOp> {
     // Constants for initializing coroutine frame.
     auto constZero =
         rewriter.create<LLVM::ConstantOp>(loc, rewriter.getI32Type(), 0);
-    auto nullPtr = rewriter.create<LLVM::NullOp>(loc, ptrType);
+    auto nullPtr = rewriter.create<LLVM::ZeroOp>(loc, ptrType);
 
     // Get coroutine id: @llvm.coro.id.
     rewriter.replaceOpWithNewOp<LLVM::CoroIdOp>(
@@ -677,7 +677,7 @@ class RuntimeCreateOpLowering : public ConvertOpToLLVMPattern<RuntimeCreateOp> {
 
         // %Size = getelementptr %T* null, int 1
         // %SizeI = ptrtoint %T* %Size to i64
-        auto nullPtr = rewriter.create<LLVM::NullOp>(loc, storagePtrType);
+        auto nullPtr = rewriter.create<LLVM::ZeroOp>(loc, storagePtrType);
         auto gep =
             rewriter.create<LLVM::GEPOp>(loc, storagePtrType, storedType,
                                          nullPtr, ArrayRef<LLVM::GEPArg>{1});
diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
index eddf3e9a47d0bc8..b6d9ec6e013b445 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
@@ -1149,7 +1149,7 @@ LogicalResult ConvertLaunchFuncOpToGpuRuntimeCallPattern::matchAndRewrite(
           : adaptor.getAsyncDependencies().front();
   // Create array of pointers to kernel arguments.
   auto kernelParams = generateParamsArray(launchOp, adaptor, rewriter);
-  auto nullpointer = rewriter.create<LLVM::NullOp>(loc, llvmPointerPointerType);
+  auto nullpointer = rewriter.create<LLVM::ZeroOp>(loc, llvmPointerPointerType);
   Value dynamicSharedMemorySize = launchOp.getDynamicSharedMemorySize()
                                       ? launchOp.getDynamicSharedMemorySize()
                                       : zero;
@@ -1211,7 +1211,7 @@ LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite(
   Value numElements = getNumElements(rewriter, loc, memRefType, srcDesc);
 
   Type elementPtrType = getElementPtrType(memRefType);
-  Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType);
+  Value nullPtr = rewriter.create<LLVM::ZeroOp>(loc, elementPtrType);
   Value gepPtr = rewriter.create<LLVM::GEPOp>(
       loc, elementPtrType,
       typeConverter->convertType(memRefType.getElementType()), nullPtr,
diff --git a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
index e5519df9b0185ff..40d4c97975a6a94 100644
--- a/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/Pattern.cpp
@@ -162,7 +162,7 @@ void ConvertToLLVMPattern::getMemRefDescriptorSizes(
     // Buffer size in bytes.
     Type elementType = typeConverter->convertType(memRefType.getElementType());
     Type elementPtrType = getTypeConverter()->getPointerType(elementType);
-    Value nullPtr = rewriter.create<LLVM::NullOp>(loc, elementPtrType);
+    Value nullPtr = rewriter.create<LLVM::ZeroOp>(loc, elementPtrType);
     Value gepPtr = rewriter.create<LLVM::GEPOp>(
         loc, elementPtrType, elementType, nullPtr, runningStride);
     size = rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gepPtr);
@@ -180,7 +180,7 @@ Value ConvertToLLVMPattern::getSizeInBytes(
   // which is a common pattern of getting the size of a type in bytes.
   Type llvmType = typeConverter->convertType(type);
   auto convertedPtrType = getTypeConverter()->getPointerType(llvmType);
-  auto nullPtr = rewriter.create<LLVM::NullOp>(loc, convertedPtrType);
+  auto nullPtr = rewriter.create<LLVM::ZeroOp>(loc, convertedPtrType);
   auto gep = rewriter.create<LLVM::GEPOp>(loc, convertedPtrType, llvmType,
                                           nullPtr, ArrayRef<LLVM::GEPArg>{1});
   return rewriter.create<LLVM::PtrToIntOp>(loc, getIndexType(), gep);
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 1cf91bde28183ac..2e0b2582eee9629 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -195,12 +195,12 @@ OpFoldResult ICmpOp::fold(FoldAdaptor adaptor) {
                             getPredicate() == ICmpPredicate::eq);
 
   // cmpi(eq/ne, alloca, null) -> false/true
-  if (getLhs().getDefiningOp<AllocaOp>() && getRhs().getDefiningOp<NullOp>())
+  if (getLhs().getDefiningOp<AllocaOp>() && getRhs().getDefiningOp<ZeroOp>())
     return getBoolAttribute(getType(), getContext(),
                             getPredicate() == ICmpPredicate::ne);
 
   // cmpi(eq/ne, null, alloca) -> cmpi(eq/ne, alloca, null)
-  if (getLhs().getDefiningOp<NullOp>() && getRhs().getDefiningOp<AllocaOp>()) {
+  if (getLhs().getDefiningOp<ZeroOp>() && getRhs().getDefiningOp<AllocaOp>()) {
     Value lhs = getLhs();
     Value rhs = getRhs();
     getLhsMutable().assign(rhs);
@@ -1466,8 +1466,8 @@ LogicalResult LandingpadOp::verify() {
                << "global addresses expected as operand to "
                   "bitcast used in clauses for landingpad";
       }
-      // NullOp and AddressOfOp allowed
-      if (value.getDefiningOp<NullOp>())
+      // ZeroOp and AddressOfOp allowed
+      if (value.getDefiningOp<ZeroOp>())
         continue;
       if (value.getDefiningOp<AddressOfOp>())
         continue;
diff --git a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
index d75601e369a0d25..894382cd7f37baf 100644
--- a/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Transforms/SparseTensorConversion.cpp
@@ -299,7 +299,7 @@ class NewCallParams final {
     assert(isInitialized() && "Must initialize before genNewCall");
     StringRef name = "newSparseTensor";
     params[kParamAction] = constantAction(builder, loc, action);
-    params[kParamPtr] = ptr ? ptr : builder.create<LLVM::NullOp>(loc, pTp);
+    params[kParamPtr] = ptr ? ptr : builder.create<LLVM::ZeroOp>(loc, pTp);
     return createFuncCall(builder, loc, name, pTp, params, EmitCInterface::On)
         .getResult(0);
   }
diff --git a/mlir/lib/Target/LLVMIR/ModuleImport.cpp b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
index c6c30880d4f2c15..3d93332c4c56796 100644
--- a/mlir/lib/Target/LLVMIR/ModuleImport.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleImport.cpp
@@ -987,7 +987,7 @@ FailureOr<Value> ModuleImport::convertConstant(llvm::Constant *constant) {
   // Convert null pointer constants.
   if (auto *nullPtr = dyn_cast<llvm::ConstantPointerNull>(constant)) {
     Type type = convertType(nullPtr->getType());
-    return builder.create<NullOp>(loc, type).getResult();
+    return builder.create<ZeroOp>(loc, type).getResult();
   }
 
   // Convert none token constants.
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
index fb15a2da2836b07..8a611cf96f5b5f8 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
@@ -3,7 +3,7 @@
 // CHECK-LABEL: @coro_id
 func.func @coro_id() {
   // CHECK: %0 = llvm.mlir.constant(0 : i32) : i32
-  // CHECK: %1 = llvm.mlir.null : !llvm.ptr
+  // CHECK: %1 = llvm.mlir.zero : !llvm.ptr
   // CHECK: %2 = llvm.intr.coro.id %0, %1, %1, %1 : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token
   %0 = async.coro.id
   return
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
index 7ff5a2c4a490d66..3672be91bbc07ad 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
@@ -9,7 +9,7 @@ func.func @create_token() {
 
 // CHECK-LABEL: @create_value
 func.func @create_value() {
-  // CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr
+  // CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
   // CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][1]
   // CHECK: %[[SIZE:.*]] = llvm.ptrtoint %[[OFFSET]]
   // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue(%[[SIZE]])
diff --git a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
index 2cdc4e8dbb1ad67..b7bcbc1262d82a8 100644
--- a/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir
@@ -52,7 +52,7 @@ module attributes {gpu.container_module} {
   // CHECK: llvm.getelementptr %[[MEMREF]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct[[STRUCT_BODY:<.*>]]
   // CHECK: llvm.getelementptr %[[MEMREF]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct[[STRUCT_BODY:<.*>]]
 
-  // CHECK: [[EXTRA_PARAMS:%.*]] = llvm.mlir.null : !llvm.ptr
+  // CHECK: [[EXTRA_PARAMS:%.*]] = llvm.mlir.zero : !llvm.ptr
 
   // CHECK: llvm.call @mgpuLaunchKernel([[FUNC]], [[C8]], [[C8]], [[C8]],
   // CHECK-SAME: [[C8]], [[C8]], [[C8]], [[C256]], [[STREAM]],
diff --git a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
index c77bf238c8ca212..f6da6d0a3be3476 100644
--- a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
+++ b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
@@ -21,7 +21,7 @@ module attributes {gpu.container_module} {
   llvm.func @malloc(i64) -> !llvm.ptr
   llvm.func @foo() {
     %0 = llvm.mlir.constant(12 : index) : i64
-    %1 = llvm.mlir.null : !llvm.ptr
+    %1 = llvm.mlir.zero : !llvm.ptr
     %2 = llvm.mlir.constant(1 : index) : i64
     %3 = llvm.getelementptr %1[%2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
     %4 = llvm.ptrtoint %3 : !llvm.ptr to i64
diff --git a/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir b/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir
index 67bd640e5d44c88..2884b33750b32ce 100644
--- a/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir
+++ b/mlir/test/Conversion/GPUToVulkan/typed-pointers.mlir
@@ -21,7 +21,7 @@ module attributes {gpu.container_module} {
   llvm.func @malloc(i64) -> !llvm.ptr<i8>
   llvm.func @foo() {
     %0 = llvm.mlir.constant(12 : index) : i64
-    %1 = llvm.mlir.null : !llvm.ptr<f32>
+    %1 = llvm.mlir.zero : !llvm.ptr<f32>
     %2 = llvm.mlir.constant(1 : index) : i64
     %3 = llvm.getelementptr %1[%2] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
     %4 = llvm.ptrtoint %3 : !llvm.ptr<f32> to i64
diff --git a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
index 447875506b1edf9..ea6a235857e6362 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
@@ -11,7 +11,7 @@ func.func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
 //  CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 //  CHECK-NEXT:  %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64
 //  CHECK-NEXT:  %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64
-//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr
+//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
 //  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
 //  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
 //  CHECK-NEXT:  llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr
@@ -60,7 +60,7 @@ func.func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 //   CHECK-DAG:  %[[N:.*]] = builtin.unrealized_conversion_cast %[[Narg]]
 //  CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 //  CHECK-NEXT:  %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64
-//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr
+//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
 //  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
 //  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
 //  CHECK-NEXT:  llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr
@@ -128,7 +128,7 @@ func.func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
 // ALIGNED-ALLOC-NEXT:  %[[sz2:.*]] = llvm.mlir.constant(18 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 // ALIGNED-ALLOC-NEXT:  %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
-// ALIGNED-ALLOC-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr
+// ALIGNED-ALLOC-NEXT:  %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
 // ALIGNED-ALLOC-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
 // ALIGNED-ALLOC-NEXT:  %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
 // ALIGNED-ALLOC-NEXT:  %[[alignment:.*]] = llvm.mlir.constant(32 : index) : i64
@@ -570,7 +570,7 @@ func.func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
 // ALIGNED-ALLOC-LABEL: @memref_of_memref
 func.func @memref_of_memref() {
   // Sizeof computation is as usual.
-  // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null
+  // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero
   // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
   // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint
 
@@ -592,7 +592,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
   // ALIGNED-ALLOC-LABEL: @memref_of_memref_32
   func.func @memref_of_memref_32() {
     // Sizeof computation is as usual.
-    // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null
+    // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero
     // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
     // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint
 
@@ -615,7 +615,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
 // ALIGNED-ALLOC-LABEL: @memref_of_memref_of_memref
 func.func @memref_of_memref_of_memref() {
   // Sizeof computation is as usual, also check the type.
-  // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr
+  // ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
   // ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
   // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint
 
@@ -631,7 +631,7 @@ func.func @memref_of_memref_of_memref() {
 
 // ALIGNED-ALLOC-LABEL: @ranked_unranked
 func.func @ranked_unranked() {
-  // ALIGNED-ALLOC: llvm.mlir.null
+  // ALIGNED-ALLOC: llvm.mlir.zero
   // ALIGNED-ALLOC-SAME: !llvm.ptr
   // ALIGNED-ALLOC: llvm.getelementptr
   // ALIGNED-ALLOC: llvm.ptrtoint
diff --git a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
index 241a868f0ee0582..35a6358d8f58b08 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
@@ -3,7 +3,7 @@
 // CHECK-LABEL: func @zero_d_alloc()
 func.func @zero_d_alloc() -> memref<f32> {
 // CHECK: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr
+// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
 // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
 // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
 // CHECK: %[[ptr:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr
@@ -36,7 +36,7 @@ func.func @zero_d_dealloc(%arg0: memref<f32>) {
 func.func @aligned_1d_alloc() -> memref<42xf32> {
 // CHECK: %[[sz1:.*]] = llvm.mlir.constant(42 : index) : i64
 // CHECK: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr
+// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
 // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
 // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
 // CHECK: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64
@@ -63,7 +63,7 @@ func.func @aligned_1d_alloc() -> memref<42xf32> {
 // CHECK-LABEL: func @static_alloc()
 func.func @static_alloc() -> memref<32x18xf32> {
 // CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
-// CHECK: %[[null:.*]] = llvm.mlir.null : !llvm.ptr
+// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
 // CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
 // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
 // CHECK: llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr
@@ -207,7 +207,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
     %0 = memref.alloc(%c1) : memref<? x vector<2xf32>>
     // CHECK: %[[CST_S:.*]] = arith.constant 1 : index
     // CHECK: %[[CST:.*]] = builtin.unrealized_conversion_cast
-    // CHECK: llvm.mlir.null
+    // CHECK: llvm.mlir.zero
     // CHECK: llvm.getelementptr %{{.*}}[[CST]]
     // CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr to i32
     // CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr to i32
diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
index 9e44029ad93bd9c..ae487ef6694745d 100644
--- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
@@ -464,7 +464,7 @@ func.func @memref_copy_ranked() {
   // CHECK: [[ONE:%.*]] = llvm.mlir.constant(1 : index) : i64
   // CHECK: [[EXTRACT0:%.*]] = llvm.extractvalue {{%.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
   // CHECK: [[MUL:%.*]] = llvm.mul [[ONE]], [[EXTRACT0]] : i64
-  // CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr
+  // CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr
   // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr, f32
   // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64
   // CHECK: [[SIZE:%.*]] = llvm.mul [[MUL]], [[PTRTOINT]] : i64
@@ -495,7 +495,7 @@ func.func @memref_copy_contiguous(%in: memref<16x4xi32>, %offset: index) {
   // CHECK: [[MUL1:%.*]] = llvm.mul {{.*}}, [[EXTRACT0]] : i64
   // CHECK: [[EXTRACT1:%.*]] = llvm.extractvalue %[[DESC]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
   // CHECK: [[MUL2:%.*]] = llvm.mul [[MUL1]], [[EXTRACT1]] : i64
-  // CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr
+  // CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr
   // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr, i32
   // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64
   // CHECK: [[SIZE:%.*]] = llvm.mul [[MUL2]], [[PTRTOINT]] : i64
@@ -610,7 +610,7 @@ func.func @extract_strided_metadata(
 // -----
 
 // CHECK-LABEL: func @load_non_temporal(
-func.func @load_non_temporal(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>>) {  
+func.func @load_non_temporal(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>>) {
   %1 = arith.constant 7 : index
   // CHECK: llvm.load %{{.*}} {nontemporal} : !llvm.ptr -> f32
   %2 = memref.load %arg0[%1] {nontemporal = true} : memref<32xf32, affine_map<(d0) -> (d0)>>
diff --git a/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir b/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir
index 893c359b7b0718e..19d053ee7813bcb 100644
--- a/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/typed-pointers.mlir
@@ -100,7 +100,7 @@ func.func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
 //  CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 //  CHECK-NEXT:  %[[st0:.*]] = llvm.mul %[[N]], %[[c42]] : i64
 //  CHECK-NEXT:  %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64
-//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.zero : !llvm.ptr<f32>
 //  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
 //  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 //  CHECK-NEXT:  llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr<i8>
@@ -140,7 +140,7 @@ func.func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
 //   CHECK-DAG:  %[[N:.*]] = builtin.unrealized_conversion_cast %[[Narg]]
 //  CHECK-NEXT:  %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
 //  CHECK-NEXT:  %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64
-//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.null : !llvm.ptr<f32>
+//  CHECK-NEXT:  %[[null:.*]] = llvm.mlir.zero : !llvm.ptr<f32>
 //  CHECK-NEXT:  %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr<f32>, i64) -> !llvm.ptr<f32>
 //  CHECK-NEXT:  %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr<f32> to i64
 //  CHECK-NEXT:  llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr<i8>
diff --git a/mlir/test/Dialect/GPU/ops.mlir b/mlir/test/Dialect/GPU/ops.mlir
index 0d2f52e8adbfcda..c638e0b21ab6f1f 100644
--- a/mlir/test/Dialect/GPU/ops.mlir
+++ b/mlir/test/Dialect/GPU/ops.mlir
@@ -147,7 +147,7 @@ module attributes {gpu.container_module} {
     %cstI64 = arith.constant 8 : i64
     %c0 = arith.constant 0 : i32
     %t0 = gpu.wait async
-    %lowStream = llvm.mlir.null : !llvm.ptr
+    %lowStream = llvm.mlir.zero : !llvm.ptr
 
     // CHECK: gpu.launch_func @kernels::@kernel_1 blocks in (%{{.*}}, %{{.*}}, %{{.*}}) threads in (%{{.*}}, %{{.*}}, %{{.*}}) args(%{{.*}} : f32, %{{.*}} : memref<?xf32, 1>)
     gpu.launch_func @kernels::@kernel_1 blocks in (%cst, %cst, %cst) threads in (%cst, %cst, %cst) args(%0 : f32, %1 : memref<?xf32, 1>)
diff --git a/mlir/test/Dialect/LLVMIR/callgraph.mlir b/mlir/test/Dialect/LLVMIR/callgraph.mlir
index ca1044b8288c456..5be0bc6252624e3 100644
--- a/mlir/test/Dialect/LLVMIR/callgraph.mlir
+++ b/mlir/test/Dialect/LLVMIR/callgraph.mlir
@@ -67,8 +67,8 @@ module attributes {"test.name" = "Invoke call"} {
     %0 = llvm.mlir.constant(0 : i32) : i32
     %1 = llvm.mlir.constant(3 : i32) : i32
     %2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8>
-    %3 = llvm.mlir.null : !llvm.ptr
-    %4 = llvm.mlir.null : !llvm.ptr
+    %3 = llvm.mlir.zero : !llvm.ptr
+    %4 = llvm.mlir.zero : !llvm.ptr
     %5 = llvm.mlir.addressof @_ZTIi : !llvm.ptr
     %6 = llvm.mlir.constant(1 : i32) : i32
     %7 = llvm.alloca %6 x i8 : (i32) -> !llvm.ptr
diff --git a/mlir/test/Dialect/LLVMIR/canonicalize.mlir b/mlir/test/Dialect/LLVMIR/canonicalize.mlir
index ed7efabb44b1ac3..5e26fa37b681d71 100644
--- a/mlir/test/Dialect/LLVMIR/canonicalize.mlir
+++ b/mlir/test/Dialect/LLVMIR/canonicalize.mlir
@@ -19,7 +19,7 @@ llvm.func @fold_icmp_ne(%arg0 : vector<2xi32>) -> vector<2xi1> {
 // CHECK-LABEL: @fold_icmp_alloca
 llvm.func @fold_icmp_alloca() -> i1 {
   // CHECK: %[[C0:.*]] = llvm.mlir.constant(true) : i1
-  %c0 = llvm.mlir.null : !llvm.ptr
+  %c0 = llvm.mlir.zero : !llvm.ptr
   %c1 = arith.constant 1 : i64
   %0 = llvm.alloca %c1 x i32 : (i64) -> !llvm.ptr
   %1 = llvm.icmp "ne" %c0, %0 : !llvm.ptr
diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir
index fc959009ff20c81..3db07f45a5babdc 100644
--- a/mlir/test/Dialect/LLVMIR/invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid.mlir
@@ -545,9 +545,9 @@ func.func @invalid_vector_type_5(%a : vector<4xf32>, %idx : i32) -> vector<4xf32
 
 // -----
 
-func.func @null_non_llvm_type() {
-  // expected-error at +1 {{'llvm.mlir.null' op result #0 must be LLVM pointer type, but got 'i32'}}
-  llvm.mlir.null : i32
+func.func @zero_non_llvm_type() {
+  // expected-error at +1 {{'llvm.mlir.zero' op result #0 must be LLVM dialect-compatible type, but got 'tensor<4xi32>'}}
+  llvm.mlir.zero : tensor<4xi32>
 }
 
 // -----
diff --git a/mlir/test/Dialect/LLVMIR/mem2reg.mlir b/mlir/test/Dialect/LLVMIR/mem2reg.mlir
index fc696c5073c3033..30ba459d07a49f3 100644
--- a/mlir/test/Dialect/LLVMIR/mem2reg.mlir
+++ b/mlir/test/Dialect/LLVMIR/mem2reg.mlir
@@ -16,7 +16,7 @@ llvm.func @default_value() -> i32 {
 llvm.func @store_of_ptr() {
   %0 = llvm.mlir.constant(1 : i32) : i32
   %1 = llvm.mlir.constant(4 : i32) : i32
-  %2 = llvm.mlir.null : !llvm.ptr
+  %2 = llvm.mlir.zero : !llvm.ptr
   // CHECK: %[[ALLOCA:.*]] = llvm.alloca
   %3 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
   // CHECK: llvm.store %{{.*}}, %[[ALLOCA]]
@@ -55,7 +55,7 @@ llvm.func @unreachable_in_loop() -> i32 {
   llvm.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
   // CHECK: llvm.br ^[[LOOP:.*]]
   llvm.br ^bb1
-  
+
 // CHECK: ^[[LOOP]]:
 ^bb1:  // 2 preds: ^bb0, ^bb3
   // CHECK-NEXT: llvm.br ^[[ENDOFLOOP:.*]]
@@ -66,7 +66,7 @@ llvm.func @unreachable_in_loop() -> i32 {
 ^bb2:  // no predecessors
   // CHECK-NEXT: llvm.br ^[[ENDOFLOOP]]
   llvm.br ^bb3
-  
+
 // CHECK: ^[[ENDOFLOOP]]:
 ^bb3:  // 2 preds: ^bb1, ^bb2
   // CHECK-NEXT: llvm.br ^[[LOOP]]
@@ -421,8 +421,8 @@ llvm.func @ignore_discardable_tree() {
   %1 = llvm.mlir.constant(0 : i16) : i16
   %2 = llvm.mlir.constant(0 : i8) : i8
   %3 = llvm.mlir.undef : !llvm.struct<(i8, i16)>
-  %4 = llvm.insertvalue %2, %3[0] : !llvm.struct<(i8, i16)> 
-  %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i8, i16)> 
+  %4 = llvm.insertvalue %2, %3[0] : !llvm.struct<(i8, i16)>
+  %5 = llvm.insertvalue %1, %4[1] : !llvm.struct<(i8, i16)>
   %6 = llvm.alloca %0 x !llvm.struct<(i8, i16)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
   %7 = llvm.getelementptr %6[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i8, i16)>
   llvm.intr.lifetime.start 2, %7 : !llvm.ptr
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir b/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir
index 7cc5a6deee54171..b1d72b690595c31 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip-typed-pointers.mlir
@@ -39,10 +39,10 @@ func.func @alloca(%size : i64) {
 
 // CHECK-LABEL: @null
 func.func @null() {
-  // CHECK: llvm.mlir.null : !llvm.ptr<i8>
-  %0 = llvm.mlir.null : !llvm.ptr<i8>
-  // CHECK: llvm.mlir.null : !llvm.ptr<struct<(ptr<func<void (i32, ptr<func<void ()>>)>>, i64)>>
-  %1 = llvm.mlir.null : !llvm.ptr<struct<(ptr<func<void (i32, ptr<func<void ()>>)>>, i64)>>
+  // CHECK: llvm.mlir.zero : !llvm.ptr<i8>
+  %0 = llvm.mlir.zero : !llvm.ptr<i8>
+  // CHECK: llvm.mlir.zero : !llvm.ptr<struct<(ptr<func<void (i32, ptr<func<void ()>>)>>, i64)>>
+  %1 = llvm.mlir.zero : !llvm.ptr<struct<(ptr<func<void (i32, ptr<func<void ()>>)>>, i64)>>
   llvm.return
 }
 
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index 558ed3058fe7585..654048d873234d9 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -329,8 +329,8 @@ func.func @alloca(%size : i64) {
 
 // CHECK-LABEL: @null
 func.func @null() {
-  // CHECK: llvm.mlir.null : !llvm.ptr
-  %0 = llvm.mlir.null : !llvm.ptr
+  // CHECK: llvm.mlir.zero : !llvm.ptr
+  %0 = llvm.mlir.zero : !llvm.ptr
   llvm.return
 }
 
@@ -386,7 +386,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
 // CHECK: %[[V0:.*]] = llvm.mlir.constant(0 : i32) : i32
 // CHECK: %{{.*}} = llvm.mlir.constant(3 : i32) : i32
 // CHECK: %[[V1:.*]] = llvm.mlir.constant("\01") : !llvm.array<1 x i8>
-// CHECK: %[[V2:.*]] = llvm.mlir.null : !llvm.ptr
+// CHECK: %[[V2:.*]] = llvm.mlir.zero : !llvm.ptr
 // CHECK: %[[V3:.*]] = llvm.mlir.addressof @_ZTIi : !llvm.ptr
 // CHECK: %[[V4:.*]] = llvm.mlir.constant(1 : i32) : i32
 // CHECK: %[[V5:.*]] = llvm.alloca %[[V4]] x i8 : (i32) -> !llvm.ptr
@@ -394,7 +394,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
   %0 = llvm.mlir.constant(0 : i32) : i32
   %1 = llvm.mlir.constant(3 : i32) : i32
   %2 = llvm.mlir.constant("\01") : !llvm.array<1 x i8>
-  %3 = llvm.mlir.null : !llvm.ptr
+  %3 = llvm.mlir.zero : !llvm.ptr
   %4 = llvm.mlir.addressof @_ZTIi : !llvm.ptr
   %5 = llvm.mlir.constant(1 : i32) : i32
   %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr
diff --git a/mlir/test/Dialect/SparseTensor/conversion.mlir b/mlir/test/Dialect/SparseTensor/conversion.mlir
index 9cc5cc01544ccbe..9d337b929fa423a 100644
--- a/mlir/test/Dialect/SparseTensor/conversion.mlir
+++ b/mlir/test/Dialect/SparseTensor/conversion.mlir
@@ -146,7 +146,7 @@ func.func @sparse_new3d(%arg0: !llvm.ptr<i8>) -> tensor<?x?x?xf32, #SparseTensor
 //   CHECK-DAG: %[[Iota:.*]] = memref.cast %[[Iota0]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: memref.store %[[I]], %[[DimSizes0]][%[[C0]]] : memref<2xindex>
 //   CHECK-DAG: memref.store %[[J]], %[[DimSizes0]][%[[C1]]] : memref<2xindex>
-//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+//       CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 //       CHECK: %[[T:.*]] = call @newSparseTensor(%[[DimSizes]], %[[LvlSizes]], %[[LvlTypes]], %[[Iota]], %[[Iota]], %{{.*}}, %{{.*}}, %{{.*}}, %[[Empty]], %[[NP]])
 //       CHECK: return %[[T]] : !llvm.ptr<i8>
 func.func @sparse_init(%arg0: index, %arg1: index) -> tensor<?x?xf64, #CSR> {
diff --git a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
index 9fb1946d56263db..2e361e940f8e151 100644
--- a/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
+++ b/mlir/test/Dialect/SparseTensor/convert_dense2sparse.mlir
@@ -34,7 +34,7 @@
 //   CHECK-DAG: %[[DimSizesP:.*]] = memref.cast %[[DimSizes]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<1xindex> to memref<?xindex>
 //   CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<1xindex> to memref<?xindex>
-//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+//       CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<1xindex>
 //       CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<1xindex> to memref<?xindex>
@@ -92,7 +92,7 @@ func.func @sparse_convert_complex(%arg0: tensor<100xcomplex<f64>>) -> tensor<100
 //   CHECK-DAG: %[[DimSizesP:.*]] = memref.cast %[[DimSizes]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<2xindex> to memref<?xindex>
-//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+//       CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
 //       CHECK: %[[T:.*]] = memref.cast %[[M]] : memref<2xindex> to memref<?xindex>
@@ -146,7 +146,7 @@ func.func @sparse_convert_2d(%arg0: tensor<2x4xf64>) -> tensor<2x4xf64, #CSR> {
 //   CHECK-DAG: %[[DimSizesP:.*]] = memref.cast %[[DimSizes]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<2xindex> to memref<?xindex>
 //   CHECK-DAG: %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<2xindex> to memref<?xindex>
-//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+//       CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<2xindex>
 //       CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<2xindex> to memref<?xindex>
@@ -220,7 +220,7 @@ func.func @sparse_constant_csc() -> tensor<8x7xf32, #CSC>{
 //   CHECK-DAG: %[[LvlSizesP:.*]] = memref.cast %[[LvlSizes]] : memref<3xindex> to memref<?xindex>
 //   CHECK-DAG: %[[Lvl2DimP:.*]] = memref.cast %[[Lvl2Dim]] : memref<3xindex> to memref<?xindex>
 //   CHECK-DAG: %[[Dim2LvlP:.*]] = memref.cast %[[Dim2Lvl]] : memref<3xindex> to memref<?xindex>
-//       CHECK: %[[NP:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+//       CHECK: %[[NP:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 //       CHECK: %[[C:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[Lvl2DimP]], %[[Dim2LvlP]], %{{.*}}, %{{.*}}, %{{.*}}, %[[EmptyCOO]], %[[NP]])
 //       CHECK: %[[M:.*]] = memref.alloca() : memref<3xindex>
 //       CHECK: %[[N:.*]] = memref.cast %[[M]] : memref<3xindex> to memref<?xindex>
diff --git a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir
index 5f412e59dba9f87..a24315755944320 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_concat.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_concat.mlir
@@ -99,7 +99,7 @@ func.func @concat_mix_dense(%arg0: tensor<2x4xf64>, %arg1: tensor<3x4xf64, #Spar
 // CHECK-DAG:     %[[IotaP_0:.*]] = memref.cast %[[Iota_0]] : memref<2xindex> to memref<?xindex>
 // CHECK-DAG:     memref.store %[[TMP_c0]], %[[Iota_0]][%[[TMP_c0]]] : memref<2xindex>
 // CHECK-DAG:     memref.store %[[TMP_c1]], %[[Iota_0]][%[[TMP_c1]]] : memref<2xindex>
-// CHECK-DAG:     %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK-DAG:     %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:         %[[TMP_7:.*]] = call @newSparseTensor(%[[DimSizesP_0]], %[[LvlSizesP_0]], %[[LvlTypesP_0]], %[[IotaP_0]], %[[IotaP_0]], %[[TMP_c0_i32]], %[[TMP_c0_i32]], %[[TMP_c1_i32]], %[[TMP_c4_i32]], %[[NullPtr]])
 // CHECK:         %[[TMP_9:.*]] = memref.alloca() : memref<2xindex>
 // CHECK:         %[[TMP_10:.*]] = memref.cast %[[TMP_9]] : memref<2xindex> to memref<?xindex>
@@ -189,7 +189,7 @@ func.func @concat_mix_sparse(%arg0: tensor<2x4xf64>, %arg1: tensor<3x4xf64, #Spa
 // CHECK-DAG:     %[[Dim2LvlP_0:.*]] = memref.cast %[[Dim2Lvl_0]] : memref<2xindex> to memref<?xindex>
 // CHECK-DAG:     memref.store %[[TMP_c1]], %[[Dim2Lvl_0]][%[[TMP_c0]]] : memref<2xindex>
 // CHECK-DAG:     memref.store %[[TMP_c0]], %[[Dim2Lvl_0]][%[[TMP_c1]]] : memref<2xindex>
-// CHECK-DAG:     %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK-DAG:     %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:         %[[TMP_7:.*]] = call @newSparseTensor(%[[DimSizesP_0]], %[[LvlSizesP_0]], %[[LvlTypesP_0]], %[[Lvl2DimP_0]], %[[Dim2LvlP_0]], %[[TMP_c0_i32]], %[[TMP_c0_i32]], %[[TMP_c1_i32]], %[[TMP_c4_i32]], %[[NullPtr]])
 // CHECK:         %[[TMP_9:.*]] = memref.alloca() : memref<2xindex>
 // CHECK:         %[[TMP_10:.*]] = memref.cast %[[TMP_9]] : memref<2xindex> to memref<?xindex>
@@ -404,7 +404,7 @@ func.func @concat_mix_dense_perm_dim1_dyn(%arg0: tensor<3x2xf64>, %arg1: tensor<
 // CHECK-DAG:     %[[Dim2LvlP_0:.*]] = memref.cast %[[Dim2Lvl_0]] : memref<2xindex> to memref<?xindex>
 // CHECK-DAG:     memref.store %[[TMP_c1]], %[[Dim2Lvl_0]][%[[TMP_c0]]] : memref<2xindex>
 // CHECK-DAG:     memref.store %[[TMP_c0]], %[[Dim2Lvl_0]][%[[TMP_c1]]] : memref<2xindex>
-// CHECK-DAG:     %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK-DAG:     %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:         %[[TMP_7:.*]] = call @newSparseTensor(%[[DimSizesP_0]], %[[LvlSizesP_0]], %[[LvlTypesP_0]], %[[Lvl2DimP_0]], %[[Dim2LvlP_0]], %[[TMP_c0_i32]], %[[TMP_c0_i32]], %[[TMP_c1_i32]], %[[TMP_c0_i32]], %[[NullPtr]])
 // CHECK:         %[[Values_r:.*]] = call @sparseValuesF64(%[[TMP_7]]) : (!llvm.ptr<i8>) -> memref<?xf64>
 // CHECK:         %[[Values:.*]] = memref.reshape %[[Values_r]]
diff --git a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir
index 7a4989304b5be2d..e1fe5d85e72ec28 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_fill_zero.mlir
@@ -31,7 +31,7 @@
 // CHECK-DAG:       %[[IotaP:.*]] = memref.cast %[[Iota]] : memref<2xindex> to memref<?xindex>
 // CHECK-DAG:       memref.store %[[I0]], %[[Iota]]{{\[}}%[[I0]]] : memref<2xindex>
 // CHECK-DAG:       memref.store %[[I1]], %[[Iota]]{{\[}}%[[I1]]] : memref<2xindex>
-// CHECK-DAG:       %[[NullPtr:.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK-DAG:       %[[NullPtr:.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK:           %[[VAL_19:.*]] = call @newSparseTensor(%[[DimSizesP]], %[[LvlSizesP]], %[[LvlTypesP]], %[[IotaP]], %[[IotaP]], %[[C0]], %[[C0]], %[[C1]], %[[C0]], %[[NullPtr]]) : (memref<?xindex>, memref<?xindex>, memref<?xi8>, memref<?xindex>, memref<?xindex>, i32, i32, i32, i32, !llvm.ptr<i8>) -> !llvm.ptr<i8>
 // CHECK:           %[[VAL_20:.*]] = memref.alloc() : memref<300xf64>
 // CHECK:           %[[VAL_21:.*]] = memref.cast %[[VAL_20]] : memref<300xf64> to memref<?xf64>
diff --git a/mlir/test/Target/LLVMIR/Import/constant.ll b/mlir/test/Target/LLVMIR/Import/constant.ll
index 85cde7b1bcfef67..620c1d3772d054d 100644
--- a/mlir/test/Target/LLVMIR/Import/constant.ll
+++ b/mlir/test/Target/LLVMIR/Import/constant.ll
@@ -49,7 +49,7 @@ define void @undef_constant(i32 %arg0) {
 
 ; CHECK-LABEL: @null_constant
 define ptr @null_constant() {
-  ; CHECK:  %[[NULL:[0-9]+]] = llvm.mlir.null : !llvm.ptr
+  ; CHECK:  %[[NULL:[0-9]+]] = llvm.mlir.zero : !llvm.ptr
   ; CHECK:  llvm.return %[[NULL]] : !llvm.ptr
   ret ptr null
 }
@@ -180,7 +180,7 @@ define i32 @function_address_after_def() {
 ; CHECK-DAG:  %[[CHAIN1:.+]] = llvm.insertvalue %[[C2]], %[[CHAIN0]][1]
 ; CHECK-DAG:  %[[CHAIN2:.+]] = llvm.insertvalue %[[C3]], %[[CHAIN1]][2]
 ; CHECK-DAG:  %[[CHAIN3:.+]] = llvm.insertvalue %[[C4]], %[[CHAIN2]][3]
-; CHECK-DAG:  %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr
+; CHECK-DAG:  %[[NULL:.+]] = llvm.mlir.zero : !llvm.ptr
 ; CHECK-DAG:  %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"nested_agg_type", (struct<"simple_agg_type", (i32, i8, i16, i32)>, ptr)>
 ; CHECK-DAG:  %[[CHAIN4:.+]] = llvm.insertvalue %[[CHAIN3]], %[[ROOT]][0]
 ; CHECK-DAG:  %[[CHAIN5:.+]] = llvm.insertvalue %[[NULL]], %[[CHAIN4]][1]
@@ -188,7 +188,7 @@ define i32 @function_address_after_def() {
 %nested_agg_type = type {%simple_agg_type, ptr}
 @nested_agg = global %nested_agg_type { %simple_agg_type{i32 1, i8 2, i16 3, i32 4}, ptr null }
 
-; CHECK-DAG:  %[[NULL:.+]] = llvm.mlir.null : !llvm.ptr
+; CHECK-DAG:  %[[NULL:.+]] = llvm.mlir.zero : !llvm.ptr
 ; CHECK-DAG:  %[[ROOT:.+]] = llvm.mlir.undef : !llvm.vec<2 x ptr>
 ; CHECK-DAG:  %[[P0:.+]] = llvm.mlir.constant(0 : i32) : i32
 ; CHECK-DAG:  %[[CHAIN0:.+]] = llvm.insertelement %[[NULL]], %[[ROOT]][%[[P0]] : i32] : !llvm.vec<2 x ptr>
diff --git a/mlir/test/Target/LLVMIR/Import/exception.ll b/mlir/test/Target/LLVMIR/Import/exception.ll
index 944e5de6badd9d7..1bdfa6f3646e9bd 100644
--- a/mlir/test/Target/LLVMIR/Import/exception.ll
+++ b/mlir/test/Target/LLVMIR/Import/exception.ll
@@ -72,7 +72,7 @@ entry:
   ; CHECK: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32
   ; CHECK: %[[c1:.*]] = llvm.mlir.constant(1 : i32) : i32
   ; CHECK: %[[c2:.*]] = llvm.mlir.constant(2 : i32) : i32
-  ; CHECK: %[[c20:.*]] = llvm.mlir.constant(20 : i32) : i32  
+  ; CHECK: %[[c20:.*]] = llvm.mlir.constant(20 : i32) : i32
   ; CHECK: llvm.cond_br %[[cond]], ^[[bb1:.*]], ^[[bb2:.*]]
   br i1 %cond, label %call, label %nocall
 ; CHECK: ^[[bb1]]:
@@ -111,7 +111,7 @@ declare void @f2({ptr, i32})
 ; CHECK-LABEL: @landingpad_dominance
 define void @landingpad_dominance() personality ptr @__gxx_personality_v0 {
 entry:
-  ; CHECK:    %[[null:.*]] = llvm.mlir.null : !llvm.ptr
+  ; CHECK:    %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
   ; CHECK:    %[[c1:.*]] = llvm.mlir.constant(0 : i32) : i32
   ; CHECK:    %[[undef:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i32)>
   ; CHECK:    %[[tmpstruct:.*]] = llvm.insertvalue %[[null]], %[[undef]][0] : !llvm.struct<(ptr, i32)>
diff --git a/mlir/test/Target/LLVMIR/Import/instructions.ll b/mlir/test/Target/LLVMIR/Import/instructions.ll
index 3f5ade4f1573579..b72e72c8392d910 100644
--- a/mlir/test/Target/LLVMIR/Import/instructions.ll
+++ b/mlir/test/Target/LLVMIR/Import/instructions.ll
@@ -182,7 +182,7 @@ define void @integer_extension_and_truncation(i32 %arg1) {
 ; CHECK-SAME:  %[[ARG1:[a-zA-Z0-9]+]]
 ; CHECK-SAME:  %[[ARG2:[a-zA-Z0-9]+]]
 define ptr @pointer_casts(ptr %arg1, i64 %arg2) {
-  ; CHECK:  %[[NULL:[0-9]+]] = llvm.mlir.null : !llvm.ptr
+  ; CHECK:  %[[NULL:[0-9]+]] = llvm.mlir.zero : !llvm.ptr
   ; CHECK:  llvm.ptrtoint %[[ARG1]] : !llvm.ptr to i64
   ; CHECK:  llvm.inttoptr %[[ARG2]] : i64 to !llvm.ptr
   ; CHECK:  llvm.bitcast %[[ARG1]] : !llvm.ptr to !llvm.ptr
diff --git a/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll b/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll
index 41933dcbe7cb183..da3eae3e9e33771 100644
--- a/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll
+++ b/mlir/test/Target/LLVMIR/Import/zeroinitializer.ll
@@ -4,7 +4,7 @@
 
 ; CHECK: llvm.mlir.global external @D()
 ; CHECK-SAME: !llvm.struct<"Domain", (ptr, ptr)>
-; CHECK: %[[E0:.+]] = llvm.mlir.null : !llvm.ptr
+; CHECK: %[[E0:.+]] = llvm.mlir.zero : !llvm.ptr
 ; CHECK: %[[ROOT:.+]] = llvm.mlir.undef : !llvm.struct<"Domain", (ptr, ptr)>
 ; CHECK: %[[CHAIN:.+]] = llvm.insertvalue %[[E0]], %[[ROOT]][0]
 ; CHECK: %[[RES:.+]] = llvm.insertvalue %[[E0]], %[[CHAIN]][1]
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 6bbd761b6e613b5..d23991b65523fcf 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -640,14 +640,14 @@ llvm.func @ushl_sat_test(%arg0: i32, %arg1: i32, %arg2: vector<8xi32>, %arg3: ve
 // CHECK-LABEL: @coro_id
 llvm.func @coro_id(%arg0: i32, %arg1: !llvm.ptr) {
   // CHECK: call token @llvm.coro.id
-  %null = llvm.mlir.null : !llvm.ptr
+  %null = llvm.mlir.zero : !llvm.ptr
   llvm.intr.coro.id %arg0, %arg1, %arg1, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token
   llvm.return
 }
 
 // CHECK-LABEL: @coro_begin
 llvm.func @coro_begin(%arg0: i32, %arg1: !llvm.ptr) {
-  %null = llvm.mlir.null : !llvm.ptr
+  %null = llvm.mlir.zero : !llvm.ptr
   %token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token
   // CHECK: call ptr @llvm.coro.begin
   llvm.intr.coro.begin %token, %arg1 : (!llvm.token, !llvm.ptr) -> !llvm.ptr
@@ -681,7 +681,7 @@ llvm.func @coro_save(%arg0: !llvm.ptr) {
 
 // CHECK-LABEL: @coro_suspend
 llvm.func @coro_suspend(%arg0: i32, %arg1 : i1, %arg2 : !llvm.ptr) {
-  %null = llvm.mlir.null : !llvm.ptr
+  %null = llvm.mlir.zero : !llvm.ptr
   %token = llvm.intr.coro.id %arg0, %arg2, %arg2, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token
   // CHECK: call i8 @llvm.coro.suspend
   %0 = llvm.intr.coro.suspend %token, %arg1 : i8
@@ -698,7 +698,7 @@ llvm.func @coro_end(%arg0: !llvm.ptr, %arg1 : i1) {
 
 // CHECK-LABEL: @coro_free
 llvm.func @coro_free(%arg0: i32, %arg1 : !llvm.ptr) {
-  %null = llvm.mlir.null : !llvm.ptr
+  %null = llvm.mlir.zero : !llvm.ptr
   %token = llvm.intr.coro.id %arg0, %arg1, %arg1, %null : (i32, !llvm.ptr, !llvm.ptr, !llvm.ptr) -> !llvm.token
   // CHECK: call ptr @llvm.coro.free
   %0 = llvm.intr.coro.free %token, %arg1 : (!llvm.token, !llvm.ptr) -> !llvm.ptr
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index d08f5b2776ee1b7..8317fae7dd71099 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -1438,7 +1438,7 @@ llvm.func @integer_extension_and_truncation(%a : i32) {
 // Check that the auxiliary `null` operation is converted into a `null` value.
 // CHECK-LABEL: @null
 llvm.func @null() -> !llvm.ptr<i32> {
-  %0 = llvm.mlir.null : !llvm.ptr<i32>
+  %0 = llvm.mlir.zero : !llvm.ptr<i32>
   // CHECK: ret ptr null
   llvm.return %0 : !llvm.ptr<i32>
 }
@@ -1536,7 +1536,7 @@ llvm.func @invokeLandingpad() -> i32 attributes { personality = @__gxx_personali
   %1 = llvm.mlir.constant(dense<0> : vector<1xi8>) : !llvm.array<1 x i8>
   %2 = llvm.mlir.addressof @_ZTIi : !llvm.ptr<ptr<i8>>
   %3 = llvm.bitcast %2 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
-  %4 = llvm.mlir.null : !llvm.ptr<ptr<i8>>
+  %4 = llvm.mlir.zero : !llvm.ptr<ptr<i8>>
   %5 = llvm.mlir.constant(1 : i32) : i32
   %6 = llvm.alloca %5 x i8 : (i32) -> !llvm.ptr<i8>
 // CHECK: invoke void @foo(ptr %[[a1]])
diff --git a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir
index 709af26f1d3dfa4..82aff6f131ef1d4 100644
--- a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir
@@ -1,31 +1,31 @@
 // RUN: mlir-translate -mlir-to-llvmir %s | FileCheck %s
 
-// CHECK-DAG: %struct.__tgt_offload_entry = type { ptr, ptr, i64, i32, i32 } 
+// CHECK-DAG: %struct.__tgt_offload_entry = type { ptr, ptr, i64, i32, i32 }
 // CHECK-DAG: !omp_offload.info = !{!{{.*}}}
 module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_target_device = false} {
 
   // CHECK-DAG: @_QMtest_0Earray_1d = global [3 x i32] [i32 1, i32 2, i32 3]
-  // CHECK-DAG: @_QMtest_0Earray_1d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_1d 
+  // CHECK-DAG: @_QMtest_0Earray_1d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_1d
   // CHECK-DAG: @.omp_offloading.entry_name{{.*}} = internal unnamed_addr constant [36 x i8] c"_QMtest_0Earray_1d_decl_tgt_ref_ptr\00"
   // CHECK-DAG: @.omp_offloading.entry._QMtest_0Earray_1d_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Earray_1d_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1
   // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Earray_1d_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}}
   llvm.mlir.global external @_QMtest_0Earray_1d(dense<[1, 2, 3]> : tensor<3xi32>) {addr_space = 0 : i32, omp.declare_target = #omp.declaretarget<device_type = (any), capture_clause = (link)>} : !llvm.array<3 x i32>
 
   // CHECK-DAG: @_QMtest_0Earray_2d = global [2 x [2 x i32]] {{.*}}
-  // CHECK-DAG: @_QMtest_0Earray_2d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_2d 
+  // CHECK-DAG: @_QMtest_0Earray_2d_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Earray_2d
   // CHECK-DAG: @.omp_offloading.entry_name{{.*}} = internal unnamed_addr constant [36 x i8] c"_QMtest_0Earray_2d_decl_tgt_ref_ptr\00"
   // CHECK-DAG: @.omp_offloading.entry._QMtest_0Earray_2d_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Earray_2d_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1
   // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Earray_2d_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}}
   llvm.mlir.global external @_QMtest_0Earray_2d() {addr_space = 0 : i32, omp.declare_target = #omp.declaretarget<device_type = (any), capture_clause = (link)>} : !llvm.array<2 x array<2 x i32>> {
     %0 = llvm.mlir.undef : !llvm.array<2 x array<2 x i32>>
     %1 = llvm.mlir.constant(1 : i32) : i32
-    %2 = llvm.insertvalue %1, %0[0, 0] : !llvm.array<2 x array<2 x i32>> 
+    %2 = llvm.insertvalue %1, %0[0, 0] : !llvm.array<2 x array<2 x i32>>
     %3 = llvm.mlir.constant(2 : i32) : i32
-    %4 = llvm.insertvalue %3, %2[0, 1] : !llvm.array<2 x array<2 x i32>> 
+    %4 = llvm.insertvalue %3, %2[0, 1] : !llvm.array<2 x array<2 x i32>>
     %5 = llvm.mlir.constant(3 : i32) : i32
-    %6 = llvm.insertvalue %5, %4[1, 0] : !llvm.array<2 x array<2 x i32>> 
+    %6 = llvm.insertvalue %5, %4[1, 0] : !llvm.array<2 x array<2 x i32>>
     %7 = llvm.mlir.constant(4 : i32) : i32
-    %8 = llvm.insertvalue %7, %6[1, 1] : !llvm.array<2 x array<2 x i32>> 
+    %8 = llvm.insertvalue %7, %6[1, 1] : !llvm.array<2 x array<2 x i32>>
     %9 = llvm.mlir.constant(2 : index) : i64
     %10 = llvm.mlir.constant(2 : index) : i64
     llvm.return %8 : !llvm.array<2 x array<2 x i32>>
@@ -70,7 +70,7 @@ module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_targe
   }
 
   // CHECK-DAG: @_QMtest_0Edata_int = global i32 1
-  // CHECK-DAG: @_QMtest_0Edata_int_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Edata_int 
+  // CHECK-DAG: @_QMtest_0Edata_int_decl_tgt_ref_ptr = weak global ptr @_QMtest_0Edata_int
   // CHECK-DAG: @.omp_offloading.entry_name{{.*}} = internal unnamed_addr constant [36 x i8] c"_QMtest_0Edata_int_decl_tgt_ref_ptr\00"
   // CHECK-DAG: @.omp_offloading.entry._QMtest_0Edata_int_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Edata_int_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1
   // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Edata_int_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}}
@@ -103,28 +103,28 @@ module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_targe
   // CHECK-DAG: @.omp_offloading.entry._QMtest_0Ept1_decl_tgt_ref_ptr = weak constant %struct.__tgt_offload_entry { ptr @_QMtest_0Ept1_decl_tgt_ref_ptr, ptr @.omp_offloading.entry_name{{.*}}, i64 8, i32 1, i32 0 }, section "omp_offloading_entries", align 1
   // CHECK-DAG: !{{.*}} = !{i32 {{.*}}, !"_QMtest_0Ept1_decl_tgt_ref_ptr", i32 {{.*}}, i32 {{.*}}}
   llvm.mlir.global external @_QMtest_0Ept1() {addr_space = 0 : i32, omp.declare_target = #omp.declaretarget<device_type = (any), capture_clause = (link)>} : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> {
-    %0 = llvm.mlir.null : !llvm.ptr<i32>
+    %0 = llvm.mlir.zero : !llvm.ptr<i32>
     %1 = llvm.mlir.constant(9 : i32) : i32
-    %2 = llvm.mlir.null : !llvm.ptr<i32>
+    %2 = llvm.mlir.zero : !llvm.ptr<i32>
     %3 = llvm.getelementptr %2[1] : (!llvm.ptr<i32>) -> !llvm.ptr<i32>
     %4 = llvm.ptrtoint %3 : !llvm.ptr<i32> to i64
     %5 = llvm.mlir.undef : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
-    %6 = llvm.insertvalue %4, %5[1] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> 
+    %6 = llvm.insertvalue %4, %5[1] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
     %7 = llvm.mlir.constant(20180515 : i32) : i32
-    %8 = llvm.insertvalue %7, %6[2] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> 
+    %8 = llvm.insertvalue %7, %6[2] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
     %9 = llvm.mlir.constant(0 : i32) : i32
     %10 = llvm.trunc %9 : i32 to i8
-    %11 = llvm.insertvalue %10, %8[3] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> 
+    %11 = llvm.insertvalue %10, %8[3] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
     %12 = llvm.trunc %1 : i32 to i8
-    %13 = llvm.insertvalue %12, %11[4] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> 
+    %13 = llvm.insertvalue %12, %11[4] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
     %14 = llvm.mlir.constant(1 : i32) : i32
     %15 = llvm.trunc %14 : i32 to i8
-    %16 = llvm.insertvalue %15, %13[5] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> 
+    %16 = llvm.insertvalue %15, %13[5] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
     %17 = llvm.mlir.constant(0 : i32) : i32
     %18 = llvm.trunc %17 : i32 to i8
-    %19 = llvm.insertvalue %18, %16[6] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> 
+    %19 = llvm.insertvalue %18, %16[6] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
     %20 = llvm.bitcast %0 : !llvm.ptr<i32> to !llvm.ptr<i32>
-    %21 = llvm.insertvalue %20, %19[0] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)> 
+    %21 = llvm.insertvalue %20, %19[0] : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
     llvm.return %21 : !llvm.struct<(ptr<i32>, i64, i32, i8, i8, i8, i8)>
   }
 
diff --git a/mlir/test/Transforms/test-convert-call-op.mlir b/mlir/test/Transforms/test-convert-call-op.mlir
index f8da5f91d3c8f0c..dd7924edc88b865 100644
--- a/mlir/test/Transforms/test-convert-call-op.mlir
+++ b/mlir/test/Transforms/test-convert-call-op.mlir
@@ -9,6 +9,6 @@ func.func @caller() -> i32 {
   %out = call @callee(%arg) : (!test.test_type) -> i32
   return %out : i32
 }
-// CHECK-NEXT: [[ARG:%.*]] = llvm.mlir.null : !llvm.ptr<i8>
+// CHECK-NEXT: [[ARG:%.*]] = llvm.mlir.zero : !llvm.ptr<i8>
 // CHECK-NEXT: [[OUT:%.*]] = llvm.call @callee([[ARG]])
 // CHECK-SAME:     : (!llvm.ptr<i8>) -> i32
diff --git a/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp b/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp
index c741a656d13f7db..9b3a1534a598eea 100644
--- a/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp
+++ b/mlir/test/lib/Conversion/FuncToLLVM/TestConvertCallOp.cpp
@@ -27,7 +27,7 @@ class TestTypeProducerOpConverter
   LogicalResult
   matchAndRewrite(test::TestTypeProducerOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    rewriter.replaceOpWithNewOp<LLVM::NullOp>(op, getVoidPtrType());
+    rewriter.replaceOpWithNewOp<LLVM::ZeroOp>(op, getVoidPtrType());
     return success();
   }
 };



More information about the flang-commits mailing list