[llvm] 497026c - [WebAssembly] Prototype prefetch instructions

Thomas Lively via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 5 11:32:15 PST 2021


Author: Thomas Lively
Date: 2021-01-05T11:32:03-08:00
New Revision: 497026c90233e82ffd3ce2438c5f9567be6dabe7

URL: https://github.com/llvm/llvm-project/commit/497026c90233e82ffd3ce2438c5f9567be6dabe7
DIFF: https://github.com/llvm/llvm-project/commit/497026c90233e82ffd3ce2438c5f9567be6dabe7.diff

LOG: [WebAssembly] Prototype prefetch instructions

As proposed in https://github.com/WebAssembly/simd/pull/352 and using the
opcodes used in the V8 prototype:
https://chromium-review.googlesource.com/c/v8/v8/+/2543167. These instructions
are only usable via intrinsics and clang builtins to make them opt-in while they
are being benchmarked.

Differential Revision: https://reviews.llvm.org/D93883

Added: 
    llvm/test/CodeGen/WebAssembly/simd-prefetch-offset.ll

Modified: 
    clang/include/clang/Basic/BuiltinsWebAssembly.def
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/CodeGen/builtins-wasm.c
    llvm/include/llvm/IR/IntrinsicsWebAssembly.td
    llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
    llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
    llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
    llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
    llvm/test/MC/WebAssembly/simd-encodings.s

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsWebAssembly.def b/clang/include/clang/Basic/BuiltinsWebAssembly.def
index d6860e0b13be..84482082095e 100644
--- a/clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ b/clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -220,5 +220,8 @@ TARGET_BUILTIN(__builtin_wasm_store64_lane, "vLLi*V2LLiIi", "n", "simd128")
 
 TARGET_BUILTIN(__builtin_wasm_eq_i64x2, "V2LLiV2LLiV2LLi", "nc", "simd128")
 
+TARGET_BUILTIN(__builtin_wasm_prefetch_t, "vv*", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_prefetch_nt, "vv*", "n", "simd128")
+
 #undef BUILTIN
 #undef TARGET_BUILTIN

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 6e98af407a9a..1e0337ca7ac3 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -17171,6 +17171,16 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_shuffle);
     return Builder.CreateCall(Callee, Ops);
   }
+  case WebAssembly::BI__builtin_wasm_prefetch_t: {
+    Value *Ptr = EmitScalarExpr(E->getArg(0));
+    Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_t);
+    return Builder.CreateCall(Callee, Ptr);
+  }
+  case WebAssembly::BI__builtin_wasm_prefetch_nt: {
+    Value *Ptr = EmitScalarExpr(E->getArg(0));
+    Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_prefetch_nt);
+    return Builder.CreateCall(Callee, Ptr);
+  }
   default:
     return nullptr;
   }

diff  --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c
index a07c278c33af..83924b48542e 100644
--- a/clang/test/CodeGen/builtins-wasm.c
+++ b/clang/test/CodeGen/builtins-wasm.c
@@ -1002,3 +1002,13 @@ i8x16 shuffle(i8x16 x, i8x16 y) {
   // WEBASSEMBLY-SAME: i32 15
   // WEBASSEMBLY-NEXT: ret
 }
+
+void prefetch_t(void *p) {
+  return __builtin_wasm_prefetch_t(p);
+  // WEBASSEMBLY: call void @llvm.wasm.prefetch.t(i8* %p)
+}
+
+void prefetch_nt(void *p) {
+  return __builtin_wasm_prefetch_nt(p);
+  // WEBASSEMBLY: call void @llvm.wasm.prefetch.nt(i8* %p)
+}

diff  --git a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
index d9a6aa78fdcd..e87700ab0fcb 100644
--- a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -311,6 +311,20 @@ def int_wasm_eq :
             [llvm_v2i64_ty, llvm_v2i64_ty],
             [IntrNoMem, IntrSpeculatable]>;
 
+// TODO: Remove this after experiments have been run. Use the target-agnostic
+// int_prefetch if this becomes specified at some point.
+def int_wasm_prefetch_t :
+  Intrinsic<[], [llvm_ptr_ty],
+            [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn,
+             ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
+            "", [SDNPMemOperand]>;
+
+def int_wasm_prefetch_nt :
+  Intrinsic<[], [llvm_ptr_ty],
+            [IntrInaccessibleMemOrArgMemOnly, IntrWillReturn,
+             ReadOnly<ArgIndex<0>>, NoCapture<ArgIndex<0>>],
+            "", [SDNPMemOperand]>;
+
 //===----------------------------------------------------------------------===//
 // Thread-local storage intrinsics
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
index 18d7b642e044..cd07a142147c 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
@@ -427,7 +427,8 @@ class WebAssemblyAsmParser final : public MCTargetAsmParser {
   bool checkForP2AlignIfLoadStore(OperandVector &Operands, StringRef InstName) {
     // FIXME: there is probably a cleaner way to do this.
     auto IsLoadStore = InstName.find(".load") != StringRef::npos ||
-                       InstName.find(".store") != StringRef::npos;
+                       InstName.find(".store") != StringRef::npos ||
+                       InstName.find("prefetch") != StringRef::npos;
     auto IsAtomic = InstName.find("atomic.") != StringRef::npos;
     if (IsLoadStore || IsAtomic) {
       // Parse load/store operands of the form: offset:p2align=align

diff  --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
index 6c819f396ddc..518c8db1920f 100644
--- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
+++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -197,6 +197,8 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
   WASM_LOAD_STORE(LOAD8_SPLAT)
   WASM_LOAD_STORE(LOAD_LANE_I8x16)
   WASM_LOAD_STORE(STORE_LANE_I8x16)
+  WASM_LOAD_STORE(PREFETCH_T)
+  WASM_LOAD_STORE(PREFETCH_NT)
   return 0;
   WASM_LOAD_STORE(LOAD16_S_I32)
   WASM_LOAD_STORE(LOAD16_U_I32)

diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 1abc610dac11..5c16f2b12d4f 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -756,6 +756,16 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
     Info.align = MemAlign;
     return true;
   }
+  case Intrinsic::wasm_prefetch_t:
+  case Intrinsic::wasm_prefetch_nt: {
+    Info.opc = ISD::INTRINSIC_VOID;
+    Info.memVT = MVT::i8;
+    Info.ptrVal = I.getArgOperand(0);
+    Info.offset = 0;
+    Info.align = Align(1);
+    Info.flags = MachineMemOperand::MOLoad;
+    return true;
+  }
   default:
     return false;
   }

diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
index 30e9fa21ba83..19851e1a786c 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -1285,3 +1285,43 @@ defm "" : SIMDQFM<F64x2, 254, 255>;
 
 defm Q15MULR_SAT_S :
   SIMDBinary<I16x8, int_wasm_q15mulr_saturate_signed, "q15mulr_sat_s", 156>;
+
+//===----------------------------------------------------------------------===//
+// Experimental prefetch instructions: prefetch.t, prefetch.nt
+//===----------------------------------------------------------------------===//
+
+let mayLoad = true, UseNamedOperandTable = true in {
+defm PREFETCH_T_A32 :
+  SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+         (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+         "prefetch.t\t${off}(${addr})$p2align",
+         "prefetch.t\t$off$p2align", 0xc5>;
+defm PREFETCH_T_A64 :
+  SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+         (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+         "prefetch.t\t${off}(${addr})$p2align",
+         "prefetch.t\t$off$p2align", 0xc5>;
+defm PREFETCH_NT_A32 :
+  SIMD_I<(outs), (ins P2Align:$p2align, offset32_op:$off, I32:$addr),
+         (outs), (ins P2Align:$p2align, offset32_op:$off), [],
+         "prefetch.nt\t${off}(${addr})$p2align",
+         "prefetch.nt\t$off$p2align", 0xc6>;
+defm PREFETCH_NT_A64 :
+  SIMD_I<(outs), (ins P2Align:$p2align, offset64_op:$off, I64:$addr),
+         (outs), (ins P2Align:$p2align, offset64_op:$off), [],
+         "prefetch.nt\t${off}(${addr})$p2align",
+         "prefetch.nt\t$off$p2align", 0xc6>;
+} // mayLoad, UseNamedOperandTable
+
+multiclass PrefetchPatNoOffset<PatFrag kind, string inst> {
+  def : Pat<(kind I32:$addr), (!cast<NI>(inst # "_A32") 0, 0, $addr)>,
+        Requires<[HasAddr32]>;
+  def : Pat<(kind I64:$addr), (!cast<NI>(inst # "_A64") 0, 0, $addr)>,
+        Requires<[HasAddr64]>;
+}
+
+foreach inst = [["PREFETCH_T", "int_wasm_prefetch_t"],
+                ["PREFETCH_NT", "int_wasm_prefetch_nt"]] in {
+defvar node = !cast<Intrinsic>(inst[1]);
+defm : PrefetchPatNoOffset<node, inst[0]>;
+}

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-prefetch-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-prefetch-offset.ll
new file mode 100644
index 000000000000..f3b54481c0e4
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/simd-prefetch-offset.ll
@@ -0,0 +1,235 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s
+
+; Test experimental prefetch instructions
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+declare void @llvm.wasm.prefetch.t(i8*)
+declare void @llvm.wasm.prefetch.nt(i8*)
+ at gv = global i8 0
+
+;===----------------------------------------------------------------------------
+; prefetch.t
+;===----------------------------------------------------------------------------
+
+define void @prefetch_t_no_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_no_offset:
+; CHECK:         .functype prefetch_t_no_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.prefetch.t(i8* %p)
+  ret void
+}
+
+define void @prefetch_t_with_folded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_folded_offset:
+; CHECK:         .functype prefetch_t_with_folded_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.prefetch.t(i8* %s)
+  ret void
+}
+
+define void @prefetch_t_with_folded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_folded_gep_offset:
+; CHECK:         .functype prefetch_t_with_folded_gep_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 6
+  tail call void @llvm.wasm.prefetch.t(i8* %s)
+  ret void
+}
+
+define void @prefetch_t_with_unfolded_gep_negative_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_unfolded_gep_negative_offset:
+; CHECK:         .functype prefetch_t_with_unfolded_gep_negative_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 -6
+  tail call void @llvm.wasm.prefetch.t(i8* %s)
+  ret void
+}
+
+define void @prefetch_t_with_unfolded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_unfolded_offset:
+; CHECK:         .functype prefetch_t_with_unfolded_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.prefetch.t(i8* %s)
+  ret void
+}
+
+define void @prefetch_t_with_unfolded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_t_with_unfolded_gep_offset:
+; CHECK:         .functype prefetch_t_with_unfolded_gep_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i8, i8* %p, i32 6
+  tail call void @llvm.wasm.prefetch.t(i8* %s)
+  ret void
+}
+
+define void @prefetch_t_from_numeric_address() {
+; CHECK-LABEL: prefetch_t_from_numeric_address:
+; CHECK:         .functype prefetch_t_from_numeric_address () -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i8*
+  tail call void @llvm.wasm.prefetch.t(i8* %s)
+  ret void
+}
+
+define void @prefetch_t_from_global_address() {
+; CHECK-LABEL: prefetch_t_from_global_address:
+; CHECK:         .functype prefetch_t_from_global_address () -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv
+; CHECK-NEXT:    prefetch.t 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.prefetch.t(i8* @gv)
+  ret void
+}
+
+;===----------------------------------------------------------------------------
+; prefetch.nt
+;===----------------------------------------------------------------------------
+
+define void @prefetch_nt_no_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_no_offset:
+; CHECK:         .functype prefetch_nt_no_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.prefetch.nt(i8* %p)
+  ret void
+}
+
+define void @prefetch_nt_with_folded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_folded_offset:
+; CHECK:         .functype prefetch_nt_with_folded_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.prefetch.nt(i8* %s)
+  ret void
+}
+
+define void @prefetch_nt_with_folded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_folded_gep_offset:
+; CHECK:         .functype prefetch_nt_with_folded_gep_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i64 6
+  tail call void @llvm.wasm.prefetch.nt(i8* %s)
+  ret void
+}
+
+define void @prefetch_nt_with_unfolded_gep_negative_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_unfolded_gep_negative_offset:
+; CHECK:         .functype prefetch_nt_with_unfolded_gep_negative_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i64 -6
+  tail call void @llvm.wasm.prefetch.nt(i8* %s)
+  ret void
+}
+
+define void @prefetch_nt_with_unfolded_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_unfolded_offset:
+; CHECK:         .functype prefetch_nt_with_unfolded_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.prefetch.nt(i8* %s)
+  ret void
+}
+
+define void @prefetch_nt_with_unfolded_gep_offset(i8* %p) {
+; CHECK-LABEL: prefetch_nt_with_unfolded_gep_offset:
+; CHECK:         .functype prefetch_nt_with_unfolded_gep_offset (i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i8, i8* %p, i64 6
+  tail call void @llvm.wasm.prefetch.nt(i8* %s)
+  ret void
+}
+
+define void @prefetch_nt_from_numeric_address() {
+; CHECK-LABEL: prefetch_nt_from_numeric_address:
+; CHECK:         .functype prefetch_nt_from_numeric_address () -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i8*
+  tail call void @llvm.wasm.prefetch.nt(i8* %s)
+  ret void
+}
+
+define void @prefetch_nt_from_global_address() {
+; CHECK-LABEL: prefetch_nt_from_global_address:
+; CHECK:         .functype prefetch_nt_from_global_address () -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv
+; CHECK-NEXT:    prefetch.nt 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.prefetch.nt(i8* @gv)
+  ret void
+}

diff  --git a/llvm/test/MC/WebAssembly/simd-encodings.s b/llvm/test/MC/WebAssembly/simd-encodings.s
index ab3ce59f25df..8a595a02e94f 100644
--- a/llvm/test/MC/WebAssembly/simd-encodings.s
+++ b/llvm/test/MC/WebAssembly/simd-encodings.s
@@ -736,4 +736,10 @@ main:
     # CHECK: i32x4.extadd_pairwise_i16x8_u # encoding: [0xfd,0xa6,0x01]
     i32x4.extadd_pairwise_i16x8_u
 
+    # CHECK: prefetch.t 16 # encoding: [0xfd,0xc5,0x01,0x00,0x10]
+    prefetch.t 16
+
+    # CHECK: prefetch.nt 16 # encoding: [0xfd,0xc6,0x01,0x00,0x10]
+    prefetch.nt 16
+
     end_function


        


More information about the llvm-commits mailing list