[Mlir-commits] [mlir] Add non-temporal support for LLVM masked loads (PR #104598)

Giuseppe Rossini llvmlistbot at llvm.org
Sat Aug 17 00:28:42 PDT 2024


https://github.com/giuseros updated https://github.com/llvm/llvm-project/pull/104598

>From 1c201daa98265888b576407f22a0e26e5d746cdc Mon Sep 17 00:00:00 2001
From: Giuseppe Rossini <giuseppe.rossini at amd.com>
Date: Fri, 16 Aug 2024 15:42:24 +0100
Subject: [PATCH 1/3] Add non-temporal support for LLVM masked loads

---
 .../mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td        | 14 +++++++++-----
 mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir     |  5 ++++-
 2 files changed, 13 insertions(+), 6 deletions(-)

diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index 2e1635e590cad6..22803c8ee59faa 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -842,22 +842,26 @@ def LLVM_GetActiveLaneMaskOp
 /// Create a call to Masked Load intrinsic.
 def LLVM_MaskedLoadOp : LLVM_OneResultIntrOp<"masked.load"> {
   let arguments = (ins LLVM_AnyPointer:$data, LLVM_VectorOf<I1>:$mask,
-                   Variadic<LLVM_AnyVector>:$pass_thru, I32Attr:$alignment);
+                   Variadic<LLVM_AnyVector>:$pass_thru, I32Attr:$alignment,
+                   DefaultValuedAttr<BoolAttr, "false">:$nontemporal);
   let results = (outs LLVM_AnyVector:$res);
   let assemblyFormat =
     "operands attr-dict `:` functional-type(operands, results)";
 
   string llvmBuilder = [{
-    $res = $pass_thru.empty() ? builder.CreateMaskedLoad(
+    auto *inst = $pass_thru.empty() ? builder.CreateMaskedLoad(
         $_resultType, $data, llvm::Align($alignment), $mask) :
       builder.CreateMaskedLoad(
         $_resultType, $data, llvm::Align($alignment), $mask, $pass_thru[0]);
-  }];
+    $res = inst;
+  }] #setNonTemporalMetadataCode;
   string mlirBuilder = [{
+    auto *intrinInst = dyn_cast<llvm::IntrinsicInst>(inst);
     $res = $_builder.create<LLVM::MaskedLoadOp>($_location,
-      $_resultType, $data, $mask, $pass_thru, $_int_attr($alignment));
+      $_resultType, $data, $mask, $pass_thru, $_int_attr($alignment),
+        mlir::BoolAttr::get($_op->getContext(), intrinInst->hasMetadata(llvm::LLVMContext::MD_nontemporal)));
   }];
-  list<int> llvmArgIndices = [0, 2, 3, 1];
+  list<int> llvmArgIndices = [0, 2, 3, 1, -1];
 }
 
 /// Create a call to Masked Store intrinsic.
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 7878aa5ee46d4f..14af06fa93992b 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -417,8 +417,11 @@ llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr, %mask: vector<7xi1>) {
   // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> poison)
   %a = llvm.intr.masked.load %A, %mask { alignment = 1: i32} :
     (!llvm.ptr, vector<7xi1>) -> vector<7xf32>
+  // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> poison), !nontemporal !1
+  %b = llvm.intr.masked.load %A, %mask { alignment = 1: i32, nontemporal=1 :i1} :
+    (!llvm.ptr, vector<7xi1>) -> vector<7xf32>
   // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
-  %b = llvm.intr.masked.load %A, %mask, %a { alignment = 1: i32} :
+  %c = llvm.intr.masked.load %A, %mask, %a { alignment = 1: i32} :
     (!llvm.ptr, vector<7xi1>, vector<7xf32>) -> vector<7xf32>
   // CHECK: call void @llvm.masked.store.v7f32.p0(<7 x float> %{{.*}}, ptr %0, i32 {{.*}}, <7 x i1> %{{.*}})
   llvm.intr.masked.store %b, %A, %mask { alignment = 1: i32} :

>From 3bb609a12368d589286595389bbc3ba77b240408 Mon Sep 17 00:00:00 2001
From: Giuseppe Rossini <giuseppe.rossini at amd.com>
Date: Fri, 16 Aug 2024 19:16:35 +0100
Subject: [PATCH 2/3] Address review feedback

---
 mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td | 2 +-
 mlir/test/Target/LLVMIR/Import/intrinsic.ll          | 2 ++
 2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index 22803c8ee59faa..c65cc9ef2f110b 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -859,7 +859,7 @@ def LLVM_MaskedLoadOp : LLVM_OneResultIntrOp<"masked.load"> {
     auto *intrinInst = dyn_cast<llvm::IntrinsicInst>(inst);
     $res = $_builder.create<LLVM::MaskedLoadOp>($_location,
       $_resultType, $data, $mask, $pass_thru, $_int_attr($alignment),
-        mlir::BoolAttr::get($_op->getContext(), intrinInst->hasMetadata(llvm::LLVMContext::MD_nontemporal)));
+        mlir::BoolAttr::get($_builder.getContext(), intrinInst->hasMetadata(llvm::LLVMContext::MD_nontemporal)));
   }];
   list<int> llvmArgIndices = [0, 2, 3, 1, -1];
 }
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 9a5528002ef5e9..87da34560926eb 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -409,6 +409,8 @@ define void @masked_load_store_intrinsics(ptr %vec, <7 x i1> %mask) {
   %1 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 1, <7 x i1> %mask, <7 x float> undef)
   ; CHECK:  %[[VAL2:.+]] = llvm.intr.masked.load %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32}
   %2 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 4, <7 x i1> %mask, <7 x float> %1)
+  ; CHECK:  %[[VAL3:.+]] = llvm.intr.masked.load %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32, nontemporal = true}
+  %3 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 4, <7 x i1> %mask, <7 x float> %1), !nontemporal !{i32 1}
   ; CHECK:  llvm.intr.masked.store %[[VAL2]], %[[VEC]], %[[MASK]] {alignment = 8 : i32}
   ; CHECK-SAME:  vector<7xf32>, vector<7xi1> into !llvm.ptr
   call void @llvm.masked.store.v7f32.p0(<7 x float> %2, ptr %vec, i32 8, <7 x i1> %mask)

>From 8119fc890ab91b70d409699a027a93f708690092 Mon Sep 17 00:00:00 2001
From: Giuseppe Rossini <giuseppe.rossini at amd.com>
Date: Sat, 17 Aug 2024 08:28:24 +0100
Subject: [PATCH 3/3] Addressing review feedback - 2

---
 mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td | 5 +++--
 mlir/test/Target/LLVMIR/Import/intrinsic.ll          | 2 +-
 mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir       | 2 +-
 3 files changed, 5 insertions(+), 4 deletions(-)

diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index c65cc9ef2f110b..86983e95fdd33d 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -843,7 +843,7 @@ def LLVM_GetActiveLaneMaskOp
 def LLVM_MaskedLoadOp : LLVM_OneResultIntrOp<"masked.load"> {
   let arguments = (ins LLVM_AnyPointer:$data, LLVM_VectorOf<I1>:$mask,
                    Variadic<LLVM_AnyVector>:$pass_thru, I32Attr:$alignment,
-                   DefaultValuedAttr<BoolAttr, "false">:$nontemporal);
+                   UnitAttr:$nontemporal);
   let results = (outs LLVM_AnyVector:$res);
   let assemblyFormat =
     "operands attr-dict `:` functional-type(operands, results)";
@@ -857,9 +857,10 @@ def LLVM_MaskedLoadOp : LLVM_OneResultIntrOp<"masked.load"> {
   }] #setNonTemporalMetadataCode;
   string mlirBuilder = [{
     auto *intrinInst = dyn_cast<llvm::IntrinsicInst>(inst);
+    bool nontemporal = intrinInst->hasMetadata(llvm::LLVMContext::MD_nontemporal);
     $res = $_builder.create<LLVM::MaskedLoadOp>($_location,
       $_resultType, $data, $mask, $pass_thru, $_int_attr($alignment),
-        mlir::BoolAttr::get($_builder.getContext(), intrinInst->hasMetadata(llvm::LLVMContext::MD_nontemporal)));
+        nontemporal ? $_builder.getUnitAttr() : nullptr);
   }];
   list<int> llvmArgIndices = [0, 2, 3, 1, -1];
 }
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 87da34560926eb..0fa82cef0a0f5a 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -409,7 +409,7 @@ define void @masked_load_store_intrinsics(ptr %vec, <7 x i1> %mask) {
   %1 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 1, <7 x i1> %mask, <7 x float> undef)
   ; CHECK:  %[[VAL2:.+]] = llvm.intr.masked.load %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32}
   %2 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 4, <7 x i1> %mask, <7 x float> %1)
-  ; CHECK:  %[[VAL3:.+]] = llvm.intr.masked.load %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32, nontemporal = true}
+  ; CHECK:  %[[VAL3:.+]] = llvm.intr.masked.load %[[VEC]], %[[MASK]], %[[VAL1]] {alignment = 4 : i32, nontemporal}
   %3 = call <7 x float> @llvm.masked.load.v7f32.p0(ptr %vec, i32 4, <7 x i1> %mask, <7 x float> %1), !nontemporal !{i32 1}
   ; CHECK:  llvm.intr.masked.store %[[VAL2]], %[[VEC]], %[[MASK]] {alignment = 8 : i32}
   ; CHECK-SAME:  vector<7xf32>, vector<7xi1> into !llvm.ptr
diff --git a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
index 14af06fa93992b..e2eadf14fc97e9 100644
--- a/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-intrinsics.mlir
@@ -418,7 +418,7 @@ llvm.func @masked_load_store_intrinsics(%A: !llvm.ptr, %mask: vector<7xi1>) {
   %a = llvm.intr.masked.load %A, %mask { alignment = 1: i32} :
     (!llvm.ptr, vector<7xi1>) -> vector<7xf32>
   // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> poison), !nontemporal !1
-  %b = llvm.intr.masked.load %A, %mask { alignment = 1: i32, nontemporal=1 :i1} :
+  %b = llvm.intr.masked.load %A, %mask { alignment = 1: i32, nontemporal} :
     (!llvm.ptr, vector<7xi1>) -> vector<7xf32>
   // CHECK: call <7 x float> @llvm.masked.load.v7f32.p0(ptr %{{.*}}, i32 1, <7 x i1> %{{.*}}, <7 x float> %{{.*}})
   %c = llvm.intr.masked.load %A, %mask, %a { alignment = 1: i32} :



More information about the Mlir-commits mailing list