[clang] 7c6bfd9 - [WebAssembly] v128.load{8, 16, 32, 64}_lane instructions

Thomas Lively via cfe-commits cfe-commits at lists.llvm.org
Thu Oct 15 08:33:20 PDT 2020


Author: Thomas Lively
Date: 2020-10-15T15:33:10Z
New Revision: 7c6bfd90ab2ddaa60de62878c8512db0645e8452

URL: https://github.com/llvm/llvm-project/commit/7c6bfd90ab2ddaa60de62878c8512db0645e8452
DIFF: https://github.com/llvm/llvm-project/commit/7c6bfd90ab2ddaa60de62878c8512db0645e8452.diff

LOG: [WebAssembly] v128.load{8,16,32,64}_lane instructions

Prototype the newly proposed load_lane instructions, as specified in
https://github.com/WebAssembly/simd/pull/350. Since these instructions are not
available to origin trial users on Chrome stable, make them opt-in by only
selecting them from intrinsics rather than normal ISel patterns. Since we only
need rough prototypes to measure performance right now, this commit does not
implement all the load and store patterns that would be necessary to make full
use of the offset immediate. However, the full suite of offset tests is included
to make it easy to track improvements in the future.

Since these are the first instructions to have a memarg immediate as well as an
additional immediate, the disassembler needed some additional hacks to be able
to parse them correctly. Making that code more principled is left as future
work.

Differential Revision: https://reviews.llvm.org/D89366

Added: 
    llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll

Modified: 
    clang/include/clang/Basic/BuiltinsWebAssembly.def
    clang/lib/CodeGen/CGBuiltin.cpp
    clang/test/CodeGen/builtins-wasm.c
    llvm/include/llvm/IR/IntrinsicsWebAssembly.td
    llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
    llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
    llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
    llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
    llvm/test/MC/WebAssembly/simd-encodings.s

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/BuiltinsWebAssembly.def b/clang/include/clang/Basic/BuiltinsWebAssembly.def
index 45e194c283b6..86348ff5fea7 100644
--- a/clang/include/clang/Basic/BuiltinsWebAssembly.def
+++ b/clang/include/clang/Basic/BuiltinsWebAssembly.def
@@ -171,8 +171,17 @@ TARGET_BUILTIN(__builtin_wasm_narrow_u_i8x16_i16x8, "V16UcV8UsV8Us", "nc", "simd
 TARGET_BUILTIN(__builtin_wasm_narrow_s_i16x8_i32x4, "V8sV4iV4i", "nc", "simd128")
 TARGET_BUILTIN(__builtin_wasm_narrow_u_i16x8_i32x4, "V8UsV4UiV4Ui", "nc", "simd128")
 
-TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "nU", "simd128")
-TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "nU", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load32_zero, "V4ii*", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load64_zero, "V2LLiLLi*", "n", "simd128")
+
+TARGET_BUILTIN(__builtin_wasm_load8_lane, "V16ScSc*V16ScIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load16_lane, "V8ss*V8sIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load32_lane, "V4ii*V4iIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_load64_lane, "V2LLiLLi*V2LLiIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store8_lane, "vSc*V16ScIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store16_lane, "vs*V8sIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store32_lane, "vi*V4iIi", "n", "simd128")
+TARGET_BUILTIN(__builtin_wasm_store64_lane, "vLLi*V2LLiIi", "n", "simd128")
 
 #undef BUILTIN
 #undef TARGET_BUILTIN

diff  --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index 157dae2831f2..3f6977e16c4a 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -16711,6 +16711,52 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
     Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_load64_zero);
     return Builder.CreateCall(Callee, {Ptr});
   }
+  case WebAssembly::BI__builtin_wasm_load8_lane:
+  case WebAssembly::BI__builtin_wasm_load16_lane:
+  case WebAssembly::BI__builtin_wasm_load32_lane:
+  case WebAssembly::BI__builtin_wasm_load64_lane:
+  case WebAssembly::BI__builtin_wasm_store8_lane:
+  case WebAssembly::BI__builtin_wasm_store16_lane:
+  case WebAssembly::BI__builtin_wasm_store32_lane:
+  case WebAssembly::BI__builtin_wasm_store64_lane: {
+    Value *Ptr = EmitScalarExpr(E->getArg(0));
+    Value *Vec = EmitScalarExpr(E->getArg(1));
+    Optional<llvm::APSInt> LaneIdxConst =
+        E->getArg(2)->getIntegerConstantExpr(getContext());
+    assert(LaneIdxConst && "Constant arg isn't actually constant?");
+    Value *LaneIdx = llvm::ConstantInt::get(getLLVMContext(), *LaneIdxConst);
+    unsigned IntNo;
+    switch (BuiltinID) {
+    case WebAssembly::BI__builtin_wasm_load8_lane:
+      IntNo = Intrinsic::wasm_load8_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_load16_lane:
+      IntNo = Intrinsic::wasm_load16_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_load32_lane:
+      IntNo = Intrinsic::wasm_load32_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_load64_lane:
+      IntNo = Intrinsic::wasm_load64_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_store8_lane:
+      IntNo = Intrinsic::wasm_store8_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_store16_lane:
+      IntNo = Intrinsic::wasm_store16_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_store32_lane:
+      IntNo = Intrinsic::wasm_store32_lane;
+      break;
+    case WebAssembly::BI__builtin_wasm_store64_lane:
+      IntNo = Intrinsic::wasm_store64_lane;
+      break;
+    default:
+      llvm_unreachable("unexpected builtin ID");
+    }
+    Function *Callee = CGM.getIntrinsic(IntNo);
+    return Builder.CreateCall(Callee, {Ptr, Vec, LaneIdx});
+  }
   case WebAssembly::BI__builtin_wasm_shuffle_v8x16: {
     Value *Ops[18];
     size_t OpIdx = 0;

diff  --git a/clang/test/CodeGen/builtins-wasm.c b/clang/test/CodeGen/builtins-wasm.c
index 85777491fc28..d7e998fb997b 100644
--- a/clang/test/CodeGen/builtins-wasm.c
+++ b/clang/test/CodeGen/builtins-wasm.c
@@ -284,6 +284,62 @@ f64x2 replace_lane_f64x2(f64x2 v, double x) {
   // WEBASSEMBLY-NEXT: ret
 }
 
+i8x16 load8_lane(signed char *p, i8x16 v) {
+  return __builtin_wasm_load8_lane(p, v, 0);
+  // WEBASSEMBLY: tail call <16 x i8> @llvm.wasm.load8.lane(
+  // WEBASSEMBLY-SAME: i8* %p, <16 x i8> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+i16x8 load16_lane(short *p, i16x8 v) {
+  return __builtin_wasm_load16_lane(p, v, 0);
+  // WEBASSEMBLY: tail call <8 x i16> @llvm.wasm.load16.lane(
+  // WEBASSEMBLY-SAME: i16* %p, <8 x i16> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+i32x4 load32_lane(int *p, i32x4 v) {
+  return __builtin_wasm_load32_lane(p, v, 0);
+  // WEBASSEMBLY: tail call <4 x i32> @llvm.wasm.load32.lane(
+  // WEBASSEMBLY-SAME: i32* %p, <4 x i32> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+i64x2 load64_lane(long long *p, i64x2 v) {
+  return __builtin_wasm_load64_lane(p, v, 0);
+  // WEBASSEMBLY: tail call <2 x i64> @llvm.wasm.load64.lane(
+  // WEBASSEMBLY-SAME: i64* %p, <2 x i64> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store8_lane(signed char *p, i8x16 v) {
+  __builtin_wasm_store8_lane(p, v, 0);
+  // WEBASSEMBLY: call void @llvm.wasm.store8.lane(
+  // WEBASSEMBLY-SAME: i8* %p, <16 x i8> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store16_lane(short *p, i16x8 v) {
+  __builtin_wasm_store16_lane(p, v, 0);
+  // WEBASSEMBLY: call void @llvm.wasm.store16.lane(
+  // WEBASSEMBLY-SAME: i16* %p, <8 x i16> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store32_lane(int *p, i32x4 v) {
+  __builtin_wasm_store32_lane(p, v, 0);
+  // WEBASSEMBLY: call void @llvm.wasm.store32.lane(
+  // WEBASSEMBLY-SAME: i32* %p, <4 x i32> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
+void store64_lane(long long *p, i64x2 v) {
+  __builtin_wasm_store64_lane(p, v, 0);
+  // WEBASSEMBLY: call void @llvm.wasm.store64.lane(
+  // WEBASSEMBLY-SAME: i64* %p, <2 x i64> %v, i32 0)
+  // WEBASSEMBLY-NEXT: ret
+}
+
 i8x16 add_saturate_s_i8x16(i8x16 x, i8x16 y) {
   return __builtin_wasm_add_saturate_s_i8x16(x, y);
   // WEBASSEMBLY: call <16 x i8> @llvm.sadd.sat.v16i8(

diff  --git a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
index a7d86e2d2ce6..8298312491fa 100644
--- a/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
+++ b/llvm/include/llvm/IR/IntrinsicsWebAssembly.td
@@ -208,6 +208,52 @@ def int_wasm_load64_zero :
             [IntrReadMem, IntrArgMemOnly],
              "", [SDNPMemOperand]>;
 
+// These intrinsics do not mark their lane index arguments as immediate because
+// that changes the corresponding SDNode from ISD::Constant to
+// ISD::TargetConstant, which would require extra complications in the ISel
+// tablegen patterns. TODO: Replace these intrinsic with normal ISel patterns
+// once the load_lane instructions are merged to the proposal.
+def int_wasm_load8_lane :
+  Intrinsic<[llvm_v16i8_ty],
+            [LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_load16_lane :
+  Intrinsic<[llvm_v8i16_ty],
+            [LLVMPointerType<llvm_i16_ty>, llvm_v8i16_ty, llvm_i32_ty],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_load32_lane :
+  Intrinsic<[llvm_v4i32_ty],
+            [LLVMPointerType<llvm_i32_ty>, llvm_v4i32_ty, llvm_i32_ty],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_load64_lane :
+  Intrinsic<[llvm_v2i64_ty],
+            [LLVMPointerType<llvm_i64_ty>, llvm_v2i64_ty, llvm_i32_ty],
+            [IntrReadMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store8_lane :
+  Intrinsic<[],
+            [LLVMPointerType<llvm_i8_ty>, llvm_v16i8_ty, llvm_i32_ty],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store16_lane :
+  Intrinsic<[],
+            [LLVMPointerType<llvm_i16_ty>, llvm_v8i16_ty, llvm_i32_ty],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store32_lane :
+  Intrinsic<[],
+            [LLVMPointerType<llvm_i32_ty>, llvm_v4i32_ty, llvm_i32_ty],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+def int_wasm_store64_lane :
+  Intrinsic<[],
+            [LLVMPointerType<llvm_i64_ty>, llvm_v2i64_ty, llvm_i32_ty],
+            [IntrWriteMem, IntrArgMemOnly],
+            "", [SDNPMemOperand]>;
+
 //===----------------------------------------------------------------------===//
 // Thread-local storage intrinsics
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
index 14908a630a0d..92e855972e8a 100644
--- a/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
+++ b/llvm/lib/Target/WebAssembly/AsmParser/WebAssemblyAsmParser.cpp
@@ -421,6 +421,12 @@ class WebAssemblyAsmParser final : public MCTargetAsmParser {
           return error("Expected integer constant");
         parseSingleInteger(false, Operands);
       } else {
+        // v128.{load,store}{8,16,32,64}_lane has both a memarg and a lane
+        // index. We need to avoid parsing an extra alignment operand for the
+        // lane index.
+        auto IsLoadStoreLane = InstName.find("_lane") != StringRef::npos;
+        if (IsLoadStoreLane && Operands.size() == 4)
+          return false;
         // Alignment not specified (or atomics, must use default alignment).
         // We can't just call WebAssembly::GetDefaultP2Align since we don't have
         // an opcode until after the assembly matcher, so set a default to fix

diff  --git a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
index 631e96dd9246..d4f3a2853d20 100644
--- a/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
+++ b/llvm/lib/Target/WebAssembly/MCTargetDesc/WebAssemblyMCTargetDesc.h
@@ -177,7 +177,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
   WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I32)
   WASM_LOAD_STORE(ATOMIC_RMW8_U_CMPXCHG_I64)
   WASM_LOAD_STORE(LOAD_SPLAT_v8x16)
-    return 0;
+  WASM_LOAD_STORE(LOAD_LANE_v16i8)
+  WASM_LOAD_STORE(STORE_LANE_v16i8)
+  return 0;
   WASM_LOAD_STORE(LOAD16_S_I32)
   WASM_LOAD_STORE(LOAD16_U_I32)
   WASM_LOAD_STORE(LOAD16_S_I64)
@@ -203,7 +205,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
   WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I32)
   WASM_LOAD_STORE(ATOMIC_RMW16_U_CMPXCHG_I64)
   WASM_LOAD_STORE(LOAD_SPLAT_v16x8)
-    return 1;
+  WASM_LOAD_STORE(LOAD_LANE_v8i16)
+  WASM_LOAD_STORE(STORE_LANE_v8i16)
+  return 1;
   WASM_LOAD_STORE(LOAD_I32)
   WASM_LOAD_STORE(LOAD_F32)
   WASM_LOAD_STORE(STORE_I32)
@@ -233,7 +237,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
   WASM_LOAD_STORE(ATOMIC_WAIT_I32)
   WASM_LOAD_STORE(LOAD_SPLAT_v32x4)
   WASM_LOAD_STORE(LOAD_ZERO_v4i32)
-    return 2;
+  WASM_LOAD_STORE(LOAD_LANE_v4i32)
+  WASM_LOAD_STORE(STORE_LANE_v4i32)
+  return 2;
   WASM_LOAD_STORE(LOAD_I64)
   WASM_LOAD_STORE(LOAD_F64)
   WASM_LOAD_STORE(STORE_I64)
@@ -256,7 +262,9 @@ inline unsigned GetDefaultP2AlignAny(unsigned Opc) {
   WASM_LOAD_STORE(LOAD_EXTEND_S_v2i64)
   WASM_LOAD_STORE(LOAD_EXTEND_U_v2i64)
   WASM_LOAD_STORE(LOAD_ZERO_v2i64)
-    return 3;
+  WASM_LOAD_STORE(LOAD_LANE_v2i64)
+  WASM_LOAD_STORE(STORE_LANE_v2i64)
+  return 3;
   WASM_LOAD_STORE(LOAD_V128)
   WASM_LOAD_STORE(STORE_V128)
     return 4;

diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 8a3f2f16c249..91a79b5985d3 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -685,6 +685,56 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
     Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
     Info.flags = MachineMemOperand::MOLoad;
     return true;
+  case Intrinsic::wasm_load8_lane:
+  case Intrinsic::wasm_load16_lane:
+  case Intrinsic::wasm_load32_lane:
+  case Intrinsic::wasm_load64_lane:
+  case Intrinsic::wasm_store8_lane:
+  case Intrinsic::wasm_store16_lane:
+  case Intrinsic::wasm_store32_lane:
+  case Intrinsic::wasm_store64_lane: {
+    MVT MemVT;
+    Align MemAlign;
+    switch (Intrinsic) {
+    case Intrinsic::wasm_load8_lane:
+    case Intrinsic::wasm_store8_lane:
+      MemVT = MVT::i8;
+      MemAlign = Align(1);
+      break;
+    case Intrinsic::wasm_load16_lane:
+    case Intrinsic::wasm_store16_lane:
+      MemVT = MVT::i16;
+      MemAlign = Align(2);
+      break;
+    case Intrinsic::wasm_load32_lane:
+    case Intrinsic::wasm_store32_lane:
+      MemVT = MVT::i32;
+      MemAlign = Align(4);
+      break;
+    case Intrinsic::wasm_load64_lane:
+    case Intrinsic::wasm_store64_lane:
+      MemVT = MVT::i64;
+      MemAlign = Align(8);
+      break;
+    default:
+      llvm_unreachable("unexpected intrinsic");
+    }
+    if (Intrinsic == Intrinsic::wasm_load8_lane ||
+        Intrinsic == Intrinsic::wasm_load16_lane ||
+        Intrinsic == Intrinsic::wasm_load32_lane ||
+        Intrinsic == Intrinsic::wasm_load64_lane) {
+      Info.opc = ISD::INTRINSIC_W_CHAIN;
+      Info.flags = MachineMemOperand::MOLoad;
+    } else {
+      Info.opc = ISD::INTRINSIC_VOID;
+      Info.flags = MachineMemOperand::MOStore;
+    }
+    Info.ptrVal = I.getArgOperand(0);
+    Info.memVT = MemVT;
+    Info.offset = 0;
+    Info.align = MemAlign;
+    return true;
+  }
   default:
     return false;
   }

diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
index 544eed544153..634f958d6ca7 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -53,7 +53,7 @@ defm LOAD_V128_A64 :
          "v128.load\t$off$p2align", 0>;
 }
 
-// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
+// Def load patterns from WebAssemblyInstrMemory.td for vector types
 foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
 defm : LoadPatNoOffset<vec_t, load, "LOAD_V128">;
 defm : LoadPatImmOff<vec_t, load, regPlusImm, "LOAD_V128">;
@@ -201,6 +201,51 @@ defm : LoadPatOffsetOnly<v2i64, int_wasm_load64_zero, "LOAD_ZERO_v2i64">;
 defm : LoadPatGlobalAddrOffOnly<v4i32, int_wasm_load32_zero, "LOAD_ZERO_v4i32">;
 defm : LoadPatGlobalAddrOffOnly<v2i64, int_wasm_load64_zero, "LOAD_ZERO_v2i64">;
 
+// Load lane
+multiclass SIMDLoadLane<ValueType vec_t, string name, bits<32> simdop> {
+  let mayLoad = 1, UseNamedOperandTable = 1 in {
+  defm LOAD_LANE_#vec_t#_A32 :
+    SIMD_I<(outs V128:$dst),
+           (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
+                I32:$addr, V128:$vec),
+           (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx",
+           name#"\t$off$p2align, $idx", simdop>;
+  defm LOAD_LANE_#vec_t#_A64 :
+    SIMD_I<(outs V128:$dst),
+           (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
+                I64:$addr, V128:$vec),
+           (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t$dst, ${off}(${addr})$p2align, $vec, $idx",
+           name#"\t$off$p2align, $idx", simdop>;
+  } // mayLoad = 1, UseNamedOperandTable = 1
+}
+
+// TODO: Also support v4f32 and v2f64 once the instructions are merged
+// to the proposal
+defm "" : SIMDLoadLane<v16i8, "v128.load8_lane", 88>;
+defm "" : SIMDLoadLane<v8i16, "v128.load16_lane", 89>;
+defm "" : SIMDLoadLane<v4i32, "v128.load32_lane", 90>;
+defm "" : SIMDLoadLane<v2i64, "v128.load64_lane", 91>;
+
+// Select loads with no constant offset.
+multiclass LoadLanePatNoOffset<ValueType ty, PatFrag kind, ImmLeaf lane_imm_t> {
+  def : Pat<(ty (kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))),
+            (!cast<NI>("LOAD_LANE_"#ty#"_A32") 0, 0, imm:$idx, I32:$addr, V128:$vec)>,
+        Requires<[HasAddr32]>;
+  def : Pat<(ty (kind (i64 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx))),
+            (!cast<NI>("LOAD_LANE_"#ty#"_A64") 0, 0, imm:$idx, I64:$addr, V128:$vec)>,
+        Requires<[HasAddr64]>;
+}
+
+defm : LoadLanePatNoOffset<v16i8, int_wasm_load8_lane, LaneIdx16>;
+defm : LoadLanePatNoOffset<v8i16, int_wasm_load16_lane, LaneIdx8>;
+defm : LoadLanePatNoOffset<v4i32, int_wasm_load32_lane, LaneIdx4>;
+defm : LoadLanePatNoOffset<v2i64, int_wasm_load64_lane, LaneIdx2>;
+
+// TODO: Also support the other load patterns for load_lane once the instructions
+// are merged to the proposal.
+
 // Store: v128.store
 let mayStore = 1, UseNamedOperandTable = 1 in {
 defm STORE_V128_A32 :
@@ -214,8 +259,9 @@ defm STORE_V128_A64 :
          "v128.store\t${off}(${addr})$p2align, $vec",
          "v128.store\t$off$p2align", 11>;
 }
+
+// Def store patterns from WebAssemblyInstrMemory.td for vector types
 foreach vec_t = [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64] in {
-// Def load and store patterns from WebAssemblyInstrMemory.td for vector types
 defm : StorePatNoOffset<vec_t, store, "STORE_V128">;
 defm : StorePatImmOff<vec_t, store, regPlusImm, "STORE_V128">;
 defm : StorePatImmOff<vec_t, store, or_is_add, "STORE_V128">;
@@ -223,6 +269,53 @@ defm : StorePatOffsetOnly<vec_t, store, "STORE_V128">;
 defm : StorePatGlobalAddrOffOnly<vec_t, store, "STORE_V128">;
 }
 
+// Store lane
+multiclass SIMDStoreLane<ValueType vec_t, string name, bits<32> simdop> {
+  let mayStore = 1, UseNamedOperandTable = 1 in {
+  defm STORE_LANE_#vec_t#_A32 :
+    SIMD_I<(outs),
+           (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx,
+                I32:$addr, V128:$vec),
+           (outs), (ins P2Align:$p2align, offset32_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t${off}(${addr})$p2align, $vec, $idx",
+           name#"\t$off$p2align, $idx", simdop>;
+  defm STORE_LANE_#vec_t#_A64 :
+    SIMD_I<(outs V128:$dst),
+           (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx,
+                I64:$addr, V128:$vec),
+           (outs), (ins P2Align:$p2align, offset64_op:$off, vec_i8imm_op:$idx),
+           [], name#"\t${off}(${addr})$p2align, $vec, $idx",
+           name#"\t$off$p2align, $idx", simdop>;
+  } // mayStore = 1, UseNamedOperandTable = 1
+}
+
+// TODO: Also support v4f32 and v2f64 once the instructions are merged
+// to the proposal
+defm "" : SIMDStoreLane<v16i8, "v128.store8_lane", 92>;
+defm "" : SIMDStoreLane<v8i16, "v128.store16_lane", 93>;
+defm "" : SIMDStoreLane<v4i32, "v128.store32_lane", 94>;
+defm "" : SIMDStoreLane<v2i64, "v128.store64_lane", 95>;
+
+// Select stores with no constant offset.
+multiclass StoreLanePatNoOffset<ValueType ty, PatFrag kind, ImmLeaf lane_imm_t> {
+  def : Pat<(kind (i32 I32:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)),
+            (!cast<NI>("STORE_LANE_"#ty#"_A32")
+              0, 0, imm:$idx, I32:$addr, ty:$vec)>,
+        Requires<[HasAddr32]>;
+  def : Pat<(kind (i32 I64:$addr), (ty V128:$vec), (i32 lane_imm_t:$idx)),
+            (!cast<NI>("STORE_LANE_"#ty#"_A64")
+              0, 0, imm:$idx, I64:$addr, ty:$vec)>,
+        Requires<[HasAddr64]>;
+}
+
+defm : StoreLanePatNoOffset<v16i8, int_wasm_store8_lane, LaneIdx16>;
+defm : StoreLanePatNoOffset<v8i16, int_wasm_store16_lane, LaneIdx8>;
+defm : StoreLanePatNoOffset<v4i32, int_wasm_store32_lane, LaneIdx4>;
+defm : StoreLanePatNoOffset<v2i64, int_wasm_store64_lane, LaneIdx2>;
+
+// TODO: Also support the other store patterns for store_lane once the
+// instructions are merged to the proposal.
+
 //===----------------------------------------------------------------------===//
 // Constructing SIMD values
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
new file mode 100644
index 000000000000..08c3f80e57bf
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
@@ -0,0 +1,968 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -mattr=+simd128 | FileCheck %s
+
+; Test SIMD v128.load{8,16,32,64}_lane instructions.
+
+; TODO: Use the offset field by supporting more patterns. Right now only the
+; equivalents of LoadPatNoOffset/StorePatNoOffset are supported.
+
+target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128"
+target triple = "wasm32-unknown-unknown"
+
+declare <16 x i8> @llvm.wasm.load8.lane(i8*, <16 x i8>, i32)
+declare <8 x i16> @llvm.wasm.load16.lane(i16*, <8 x i16>, i32)
+declare <4 x i32> @llvm.wasm.load32.lane(i32*, <4 x i32>, i32)
+declare <2 x i64> @llvm.wasm.load64.lane(i64*, <2 x i64>, i32)
+
+declare void @llvm.wasm.store8.lane(i8*, <16 x i8>, i32)
+declare void @llvm.wasm.store16.lane(i16*, <8 x i16>, i32)
+declare void @llvm.wasm.store32.lane(i32*, <4 x i32>, i32)
+declare void @llvm.wasm.store64.lane(i64*, <2 x i64>, i32)
+
+;===----------------------------------------------------------------------------
+; v128.load8_lane / v128.store8_lane
+;===----------------------------------------------------------------------------
+
+define <16 x i8> @load_lane_i8_no_offset(i8* %p, <16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_no_offset:
+; CHECK:         .functype load_lane_i8_no_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %p, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_folded_offset(i8* %p, <16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_with_folded_offset:
+; CHECK:         .functype load_lane_i8_with_folded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_folded_gep_offset(i8* %p, <16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i8_with_folded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 6
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_unfolded_gep_negative_offset(i8* %p, <16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i8_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 -6
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_unfolded_offset(i8* %p, <16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_with_unfolded_offset:
+; CHECK:         .functype load_lane_i8_with_unfolded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_with_unfolded_gep_offset(i8* %p, <16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i8_with_unfolded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i8, i8* %p, i32 6
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+define <16 x i8> @load_lane_i8_from_numeric_address(<16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_from_numeric_address:
+; CHECK:         .functype load_lane_i8_from_numeric_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i8*
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+ at gv_i8 = global i8 0
+define <16 x i8> @load_lane_i8_from_global_address(<16 x i8> %v) {
+; CHECK-LABEL: load_lane_i8_from_global_address:
+; CHECK:         .functype load_lane_i8_from_global_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i8
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <16 x i8> @llvm.wasm.load8.lane(i8* @gv_i8, <16 x i8> %v, i32 0)
+  ret <16 x i8> %t
+}
+
+define void @store_lane_i8_no_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_no_offset:
+; CHECK:         .functype store_lane_i8_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store8.lane(i8* %p, <16 x i8> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i8_with_folded_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_folded_offset:
+; CHECK:         .functype store_lane_i8_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i8_with_folded_gep_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i8_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 6
+  tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i8_with_unfolded_gep_negative_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i8_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i8, i8* %p, i32 -6
+  tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i8_with_unfolded_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_unfolded_offset:
+; CHECK:         .functype store_lane_i8_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i8* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i8*
+  tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i8_with_unfolded_gep_offset(<16 x i8> %v, i8* %p) {
+; CHECK-LABEL: store_lane_i8_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i8_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 6
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i8, i8* %p, i32 6
+  tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i8_to_numeric_address(<16 x i8> %v) {
+; CHECK-LABEL: store_lane_i8_to_numeric_address:
+; CHECK:         .functype store_lane_i8_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i8*
+  tail call void @llvm.wasm.store8.lane(i8* %s, <16 x i8> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i8_from_global_address(<16 x i8> %v) {
+; CHECK-LABEL: store_lane_i8_from_global_address:
+; CHECK:         .functype store_lane_i8_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i8
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store8_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store8.lane(i8* @gv_i8, <16 x i8> %v, i32 0)
+  ret void
+}
+
+;===----------------------------------------------------------------------------
+; v128.load16_lane / v128.store16_lane
+;===----------------------------------------------------------------------------
+
+define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_no_offset:
+; CHECK:         .functype load_lane_i16_no_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %p, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_with_folded_offset:
+; CHECK:         .functype load_lane_i16_with_folded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i16_with_folded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 6
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i16_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 -6
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_with_unfolded_offset:
+; CHECK:         .functype load_lane_i16_with_unfolded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i16_with_unfolded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i16, i16* %p, i32 6
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+define <8 x i16> @load_lane_i16_from_numeric_address(<8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_from_numeric_address:
+; CHECK:         .functype load_lane_i16_from_numeric_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i16*
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+ at gv_i16 = global i16 0
+define <8 x i16> @load_lane_i16_from_global_address(<8 x i16> %v) {
+; CHECK-LABEL: load_lane_i16_from_global_address:
+; CHECK:         .functype load_lane_i16_from_global_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i16
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
+  ret <8 x i16> %t
+}
+
+define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_no_offset:
+; CHECK:         .functype store_lane_i16_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store16.lane(i16* %p, <8 x i16> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_folded_offset:
+; CHECK:         .functype store_lane_i16_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i16_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 6
+  tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i16_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i16, i16* %p, i32 -6
+  tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_unfolded_offset:
+; CHECK:         .functype store_lane_i16_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i16* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i16*
+  tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) {
+; CHECK-LABEL: store_lane_i16_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i16_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 12
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i16, i16* %p, i32 6
+  tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i16_to_numeric_address(<8 x i16> %v) {
+; CHECK-LABEL: store_lane_i16_to_numeric_address:
+; CHECK:         .functype store_lane_i16_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i16*
+  tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i16_from_global_address(<8 x i16> %v) {
+; CHECK-LABEL: store_lane_i16_from_global_address:
+; CHECK:         .functype store_lane_i16_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i16
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
+  ret void
+}
+
+;===----------------------------------------------------------------------------
+; v128.load32_lane / v128.store32_lane
+;===----------------------------------------------------------------------------
+
+define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_no_offset:
+; CHECK:         .functype load_lane_i32_no_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %p, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_with_folded_offset:
+; CHECK:         .functype load_lane_i32_with_folded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i32_with_folded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 6
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i32_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 -6
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_with_unfolded_offset:
+; CHECK:         .functype load_lane_i32_with_unfolded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i32_with_unfolded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i32, i32* %p, i32 6
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+define <4 x i32> @load_lane_i32_from_numeric_address(<4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_from_numeric_address:
+; CHECK:         .functype load_lane_i32_from_numeric_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i32*
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+ at gv_i32 = global i32 0
+define <4 x i32> @load_lane_i32_from_global_address(<4 x i32> %v) {
+; CHECK-LABEL: load_lane_i32_from_global_address:
+; CHECK:         .functype load_lane_i32_from_global_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i32
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
+  ret <4 x i32> %t
+}
+
+define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_no_offset:
+; CHECK:         .functype store_lane_i32_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store32.lane(i32* %p, <4 x i32> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_folded_offset:
+; CHECK:         .functype store_lane_i32_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i32_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 6
+  tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i32_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i32, i32* %p, i32 -6
+  tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_unfolded_offset:
+; CHECK:         .functype store_lane_i32_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i32* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i32*
+  tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) {
+; CHECK-LABEL: store_lane_i32_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i32_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i32, i32* %p, i32 6
+  tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i32_to_numeric_address(<4 x i32> %v) {
+; CHECK-LABEL: store_lane_i32_to_numeric_address:
+; CHECK:         .functype store_lane_i32_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i32*
+  tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i32_from_global_address(<4 x i32> %v) {
+; CHECK-LABEL: store_lane_i32_from_global_address:
+; CHECK:         .functype store_lane_i32_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i32
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
+  ret void
+}
+
+;===----------------------------------------------------------------------------
+; v128.load64_lane / v128.store64_lane
+;===----------------------------------------------------------------------------
+
+define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_no_offset:
+; CHECK:         .functype load_lane_i64_no_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %p, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_with_folded_offset:
+; CHECK:         .functype load_lane_i64_with_folded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_with_folded_gep_offset:
+; CHECK:         .functype load_lane_i64_with_folded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 6
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_with_unfolded_gep_negative_offset:
+; CHECK:         .functype load_lane_i64_with_unfolded_gep_negative_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const -48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 -6
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_with_unfolded_offset:
+; CHECK:         .functype load_lane_i64_with_unfolded_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_with_unfolded_gep_offset:
+; CHECK:         .functype load_lane_i64_with_unfolded_gep_offset (i32, v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i64, i64* %p, i32 6
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+define <2 x i64> @load_lane_i64_from_numeric_address(<2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_from_numeric_address:
+; CHECK:         .functype load_lane_i64_from_numeric_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i64*
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+ at gv_i64 = global i64 0
+define <2 x i64> @load_lane_i64_from_global_address(<2 x i64> %v) {
+; CHECK-LABEL: load_lane_i64_from_global_address:
+; CHECK:         .functype load_lane_i64_from_global_address (v128) -> (v128)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i64
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
+  ret <2 x i64> %t
+}
+
+define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_no_offset:
+; CHECK:         .functype store_lane_i64_no_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store64.lane(i64* %p, <2 x i64> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_folded_offset:
+; CHECK:         .functype store_lane_i64_with_folded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nuw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_folded_gep_offset:
+; CHECK:         .functype store_lane_i64_with_folded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 6
+  tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_unfolded_gep_negative_offset:
+; CHECK:         .functype store_lane_i64_with_unfolded_gep_negative_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const -48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr inbounds i64, i64* %p, i32 -6
+  tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_unfolded_offset:
+; CHECK:         .functype store_lane_i64_with_unfolded_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 24
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %q = ptrtoint i64* %p to i32
+  %r = add nsw i32 %q, 24
+  %s = inttoptr i32 %r to i64*
+  tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) {
+; CHECK-LABEL: store_lane_i64_with_unfolded_gep_offset:
+; CHECK:         .functype store_lane_i64_with_unfolded_gep_offset (v128, i32) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    local.get 1
+; CHECK-NEXT:    i32.const 48
+; CHECK-NEXT:    i32.add
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = getelementptr i64, i64* %p, i32 6
+  tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i64_to_numeric_address(<2 x i64> %v) {
+; CHECK-LABEL: store_lane_i64_to_numeric_address:
+; CHECK:         .functype store_lane_i64_to_numeric_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 42
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  %s = inttoptr i32 42 to i64*
+  tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
+  ret void
+}
+
+define void @store_lane_i64_from_global_address(<2 x i64> %v) {
+; CHECK-LABEL: store_lane_i64_from_global_address:
+; CHECK:         .functype store_lane_i64_from_global_address (v128) -> ()
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const gv_i64
+; CHECK-NEXT:    local.get 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    # fallthrough-return
+  tail call void @llvm.wasm.store64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
+  ret void
+}

diff  --git a/llvm/test/MC/WebAssembly/simd-encodings.s b/llvm/test/MC/WebAssembly/simd-encodings.s
index 57b42f2753f6..f18a4f196d68 100644
--- a/llvm/test/MC/WebAssembly/simd-encodings.s
+++ b/llvm/test/MC/WebAssembly/simd-encodings.s
@@ -280,6 +280,30 @@ main:
     # CHECK: v128.bitselect # encoding: [0xfd,0x52]
     v128.bitselect
 
+    # CHECK: v128.load8_lane 32, 1 # encoding: [0xfd,0x58,0x00,0x20,0x01]
+    v128.load8_lane 32, 1
+
+    # CHECK: v128.load16_lane 32, 1 # encoding: [0xfd,0x59,0x01,0x20,0x01]
+    v128.load16_lane 32, 1
+
+    # CHECK: v128.load32_lane 32, 1 # encoding: [0xfd,0x5a,0x02,0x20,0x01]
+    v128.load32_lane 32, 1
+
+    # CHECK: v128.load64_lane 32, 1 # encoding: [0xfd,0x5b,0x03,0x20,0x01]
+    v128.load64_lane 32, 1
+
+    # CHECK: v128.store8_lane 32, 1 # encoding: [0xfd,0x5c,0x00,0x20,0x01]
+    v128.store8_lane 32, 1
+
+    # CHECK: v128.store16_lane 32, 1 # encoding: [0xfd,0x5d,0x01,0x20,0x01]
+    v128.store16_lane 32, 1
+
+    # CHECK: v128.store32_lane 32, 1 # encoding: [0xfd,0x5e,0x02,0x20,0x01]
+    v128.store32_lane 32, 1
+
+    # CHECK: v128.store64_lane 32, 1 # encoding: [0xfd,0x5f,0x03,0x20,0x01]
+    v128.store64_lane 32, 1
+
     # CHECK: i8x16.abs # encoding: [0xfd,0x60]
     i8x16.abs
 


        


More information about the cfe-commits mailing list