[llvm] b69374c - [WebAssembly] Legalize vector types by widening

Thomas Lively via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 19 12:07:40 PDT 2021


Author: Thomas Lively
Date: 2021-08-19T12:07:33-07:00
New Revision: b69374ca58d3e5e62688efd8149417499096d8a7

URL: https://github.com/llvm/llvm-project/commit/b69374ca58d3e5e62688efd8149417499096d8a7
DIFF: https://github.com/llvm/llvm-project/commit/b69374ca58d3e5e62688efd8149417499096d8a7.diff

LOG: [WebAssembly] Legalize vector types by widening

The default legalization of unsupported vector types is to promote the integers
in each lane, which leads to extra sign or zero extending and masking when
moving data into and out of vectors. Switch our preferred type legalization from
the default to vector widening, which keeps the data in the low lanes of the
vector rather than in the low bits of each lane. The unused high lanes can be
ignored.

Half-wide vectors are now loaded from memory into the low 64 bits of the v128
rather than spread out among the lanes. As a result, v128.load64_splat is a much
more common operation, so add new patterns to support it.

Differential Revision: https://reviews.llvm.org/D107502

Added: 
    

Modified: 
    llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
    llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
    llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
    llvm/test/CodeGen/WebAssembly/simd-concat.ll
    llvm/test/CodeGen/WebAssembly/simd-extending.ll
    llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
    llvm/test/CodeGen/WebAssembly/simd-offset.ll

Removed: 
    llvm/test/CodeGen/WebAssembly/simd-nonconst-sext.ll
    llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll


################################################################################
diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 927b83e8a067c..507895eb90ffb 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -303,9 +303,6 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
       setLoadExtAction(Ext, MVT::v4i32, MVT::v4i16, Legal);
       setLoadExtAction(Ext, MVT::v2i64, MVT::v2i32, Legal);
     }
-    // And some truncating stores are legal as well
-    setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
-    setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
   }
 
   // Don't do anything clever with build_pairs
@@ -854,6 +851,21 @@ void WebAssemblyTargetLowering::computeKnownBitsForTargetNode(
   }
 }
 
+TargetLoweringBase::LegalizeTypeAction
+WebAssemblyTargetLowering::getPreferredVectorAction(MVT VT) const {
+  if (VT.isFixedLengthVector()) {
+    MVT EltVT = VT.getVectorElementType();
+    // We have legal vector types with these lane types, so widening the
+    // vector would let us use some of the lanes directly without having to
+    // extend or truncate values.
+    if (EltVT == MVT::i8 || EltVT == MVT::i16 || EltVT == MVT::i32 ||
+        EltVT == MVT::i64 || EltVT == MVT::f32 || EltVT == MVT::f64)
+      return TypeWidenVector;
+  }
+
+  return TargetLoweringBase::getPreferredVectorAction(VT);
+}
+
 //===----------------------------------------------------------------------===//
 // WebAssembly Lowering private implementation.
 //===----------------------------------------------------------------------===//

diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
index 748b771d930f0..752a8a093d9c9 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.h
@@ -114,6 +114,9 @@ class WebAssemblyTargetLowering final : public TargetLowering {
                                      const SelectionDAG &DAG,
                                      unsigned Depth) const override;
 
+  TargetLoweringBase::LegalizeTypeAction
+  getPreferredVectorAction(MVT VT) const override;
+
   SDValue LowerCall(CallLoweringInfo &CLI,
                     SmallVectorImpl<SDValue> &InVals) const override;
   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,

diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
index 6429b46673a61..3e0f20f757704 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyInstrSIMD.td
@@ -267,6 +267,16 @@ multiclass SIMDLoadZero<Vec vec, bits<32> simdop> {
 defm "" : SIMDLoadZero<I32x4, 0x5c>;
 defm "" : SIMDLoadZero<I64x2, 0x5d>;
 
+// Use load_zero to load scalars into vectors as well where possible.
+// TODO: i32, i16, and i8 scalars
+def load_scalar :
+  PatFrag<(ops node:$addr), (scalar_to_vector (i64 (load $addr)))>;
+defm : LoadPatNoOffset<v2i64, load_scalar, "LOAD_ZERO_I64x2">;
+defm : LoadPatImmOff<v2i64, load_scalar, regPlusImm, "LOAD_ZERO_I64x2">;
+defm : LoadPatImmOff<v2i64, load_scalar, or_is_add, "LOAD_ZERO_I64x2">;
+defm : LoadPatOffsetOnly<v2i64, load_scalar, "LOAD_ZERO_I64x2">;
+defm : LoadPatGlobalAddrOffOnly<v2i64, load_scalar, "LOAD_ZERO_I64x2">;
+
 // TODO: f32x4 and f64x2 as well
 foreach vec = [I32x4, I64x2] in {
   defvar inst = "LOAD_ZERO_"#vec;
@@ -1241,87 +1251,6 @@ multiclass SIMDNarrow<Vec vec, bits<32> baseInst> {
 defm "" : SIMDNarrow<I16x8, 101>;
 defm "" : SIMDNarrow<I32x4, 133>;
 
-// Use narrowing operations for truncating stores. Since the narrowing
-// operations are saturating instead of truncating, we need to mask
-// the stored values first.
-def store_v8i8_trunc_v8i16 :
-  OutPatFrag<(ops node:$val),
-             (EXTRACT_LANE_I64x2
-               (NARROW_U_I8x16
-                 (AND
-                   (CONST_V128_I16x8
-                     0x00ff, 0x00ff, 0x00ff, 0x00ff,
-                     0x00ff, 0x00ff, 0x00ff, 0x00ff),
-                   node:$val),
-                 $val), // Unused input
-               0)>;
-
-def store_v4i16_trunc_v4i32 :
-  OutPatFrag<(ops node:$val),
-             (EXTRACT_LANE_I64x2
-               (NARROW_U_I16x8
-                 (AND
-                  (CONST_V128_I32x4
-                    0x0000ffff, 0x0000ffff, 0x0000ffff, 0x0000ffff),
-                  node:$val),
-                 $val), // Unused input
-               0)>;
-
-// Store patterns adapted from WebAssemblyInstrMemory.td
-multiclass NarrowingStorePatNoOffset<Vec vec, OutPatFrag out> {
-  defvar node = !cast<PatFrag>("truncstorevi"#vec.split.lane_bits);
-  def : Pat<(node vec.vt:$val, I32:$addr),
-            (STORE_I64_A32 0, 0, $addr, (out $val))>,
-        Requires<[HasAddr32]>;
-  def : Pat<(node vec.vt:$val, I64:$addr),
-            (STORE_I64_A64 0, 0, $addr, (out $val))>,
-        Requires<[HasAddr64]>;
-}
-
-defm : NarrowingStorePatNoOffset<I16x8, store_v8i8_trunc_v8i16>;
-defm : NarrowingStorePatNoOffset<I32x4, store_v4i16_trunc_v4i32>;
-
-multiclass NarrowingStorePatImmOff<Vec vec, PatFrag operand, OutPatFrag out> {
-  defvar node = !cast<PatFrag>("truncstorevi"#vec.split.lane_bits);
-  def : Pat<(node vec.vt:$val, (operand I32:$addr, imm:$off)),
-            (STORE_I64_A32 0, imm:$off, $addr, (out $val))>,
-        Requires<[HasAddr32]>;
-  def : Pat<(node vec.vt:$val, (operand I64:$addr, imm:$off)),
-            (STORE_I64_A64 0, imm:$off, $addr, (out $val))>,
-        Requires<[HasAddr64]>;
-}
-
-defm : NarrowingStorePatImmOff<I16x8, regPlusImm, store_v8i8_trunc_v8i16>;
-defm : NarrowingStorePatImmOff<I32x4, regPlusImm, store_v4i16_trunc_v4i32>;
-defm : NarrowingStorePatImmOff<I16x8, or_is_add, store_v8i8_trunc_v8i16>;
-defm : NarrowingStorePatImmOff<I32x4, or_is_add, store_v4i16_trunc_v4i32>;
-
-multiclass NarrowingStorePatOffsetOnly<Vec vec, OutPatFrag out> {
-  defvar node = !cast<PatFrag>("truncstorevi"#vec.split.lane_bits);
-  def : Pat<(node vec.vt:$val, imm:$off),
-            (STORE_I64_A32 0, imm:$off, (CONST_I32 0), (out $val))>,
-        Requires<[HasAddr32]>;
-  def : Pat<(node vec.vt:$val, imm:$off),
-            (STORE_I64_A64 0, imm:$off, (CONST_I64 0), (out $val))>,
-        Requires<[HasAddr64]>;
-}
-
-defm : NarrowingStorePatOffsetOnly<I16x8, store_v8i8_trunc_v8i16>;
-defm : NarrowingStorePatOffsetOnly<I32x4, store_v4i16_trunc_v4i32>;
-
-multiclass NarrowingStorePatGlobalAddrOffOnly<Vec vec, OutPatFrag out> {
-  defvar node = !cast<PatFrag>("truncstorevi"#vec.split.lane_bits);
-  def : Pat<(node vec.vt:$val, (WebAssemblywrapper tglobaladdr:$off)),
-            (STORE_I64_A32 0, tglobaladdr:$off, (CONST_I32 0), (out $val))>,
-        Requires<[IsNotPIC, HasAddr32]>;
-  def : Pat<(node vec.vt:$val, (WebAssemblywrapper tglobaladdr:$off)),
-            (STORE_I64_A64  0, tglobaladdr:$off, (CONST_I64 0), (out $val))>,
-        Requires<[IsNotPIC, HasAddr64]>;
-}
-
-defm : NarrowingStorePatGlobalAddrOffOnly<I16x8, store_v8i8_trunc_v8i16>;
-defm : NarrowingStorePatGlobalAddrOffOnly<I32x4, store_v4i16_trunc_v4i32>;
-
 // Bitcasts are nops
 // Matching bitcast t1 to t1 causes strange errors, so avoid repeating types
 foreach t1 = AllVecs in

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-concat.ll b/llvm/test/CodeGen/WebAssembly/simd-concat.ll
index ede1868917860..ec7fc9836c3ef 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-concat.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-concat.ll
@@ -11,7 +11,7 @@ define <16 x i8> @concat_v8i8(<8 x i8> %a, <8 x i8> %b) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    i8x16.shuffle 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23
 ; CHECK-NEXT:    # fallthrough-return
   %v = shufflevector <8 x i8> %a, <8 x i8> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
   ret <16 x i8> %v
@@ -23,7 +23,7 @@ define <8 x i8> @concat_v4i8(<4 x i8> %a, <4 x i8> %b) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    i8x16.shuffle 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29
+; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = shufflevector <4 x i8> %a, <4 x i8> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i8> %v
@@ -35,7 +35,7 @@ define <8 x i16> @concat_v4i16(<4 x i16> %a, <4 x i16> %b) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    i8x16.shuffle 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29
+; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23
 ; CHECK-NEXT:    # fallthrough-return
   %v = shufflevector <4 x i16> %a, <4 x i16> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
   ret <8 x i16> %v
@@ -47,7 +47,7 @@ define <4 x i8> @concat_v2i8(<2 x i8> %a, <2 x i8> %b) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK-NEXT:    i8x16.shuffle 0, 1, 16, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = shufflevector <2 x i8> %a, <2 x i8> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i8> %v
@@ -59,7 +59,7 @@ define <4 x i16> @concat_v2i16(<2 x i16> %a, <2 x i16> %b) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 16, 17, 18, 19, 0, 0, 0, 0, 0, 0, 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = shufflevector <2 x i16> %a, <2 x i16> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i16> %v
@@ -71,7 +71,7 @@ define <4 x i32> @concat_v2i32(<2 x i32> %a, <2 x i32> %b) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27
+; CHECK-NEXT:    i8x16.shuffle 0, 1, 2, 3, 4, 5, 6, 7, 16, 17, 18, 19, 20, 21, 22, 23
 ; CHECK-NEXT:    # fallthrough-return
   %v = shufflevector <2 x i32> %a, <2 x i32> %b, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
   ret <4 x i32> %v

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-extending.ll b/llvm/test/CodeGen/WebAssembly/simd-extending.ll
index 5cab13c0c61db..0caec7d0b0d0e 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-extending.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-extending.ll
@@ -169,29 +169,8 @@ define <8 x i16> @extend_lowish_i8x16_s(<16 x i8> %v) {
 ; CHECK:         .functype extend_lowish_i8x16_s (v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 1
-; CHECK-NEXT:    i16x8.splat
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 2
-; CHECK-NEXT:    i16x8.replace_lane 1
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 3
-; CHECK-NEXT:    i16x8.replace_lane 2
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 4
-; CHECK-NEXT:    i16x8.replace_lane 3
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 5
-; CHECK-NEXT:    i16x8.replace_lane 4
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 6
-; CHECK-NEXT:    i16x8.replace_lane 5
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 7
-; CHECK-NEXT:    i16x8.replace_lane 6
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.extract_lane_u 8
-; CHECK-NEXT:    i16x8.replace_lane 7
+; CHECK-NEXT:    i8x16.shuffle 1, 0, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 8, 0
 ; CHECK-NEXT:    i32.const 8
 ; CHECK-NEXT:    i16x8.shl
 ; CHECK-NEXT:    i32.const 8
@@ -208,17 +187,8 @@ define <4 x i32> @extend_lowish_i16x8_s(<8 x i16> %v) {
 ; CHECK:         .functype extend_lowish_i16x8_s (v128) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.extract_lane_u 1
-; CHECK-NEXT:    i32x4.splat
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.extract_lane_u 2
-; CHECK-NEXT:    i32x4.replace_lane 1
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.extract_lane_u 3
-; CHECK-NEXT:    i32x4.replace_lane 2
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.extract_lane_u 4
-; CHECK-NEXT:    i32x4.replace_lane 3
+; CHECK-NEXT:    i8x16.shuffle 2, 3, 0, 0, 4, 5, 0, 0, 6, 7, 0, 0, 8, 9, 0, 0
 ; CHECK-NEXT:    i32.const 16
 ; CHECK-NEXT:    i32x4.shl
 ; CHECK-NEXT:    i32.const 16

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll b/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
index 60afb367cbbeb..b63900efb35e1 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-store-alignment.ll
@@ -294,7 +294,7 @@ define <8 x i8> @load_ext_v8i16_a1(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 0:p2align=0
+; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <8 x i8>, <8 x i8>* %p, align 1
   ret <8 x i8> %v
@@ -305,7 +305,7 @@ define <8 x i8> @load_ext_v8i16_a2(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 0:p2align=1
+; CHECK-NEXT:    v128.load64_zero 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <8 x i8>, <8 x i8>* %p, align 2
   ret <8 x i8> %v
@@ -316,7 +316,7 @@ define <8 x i8> @load_ext_v8i16_a4(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 0:p2align=2
+; CHECK-NEXT:    v128.load64_zero 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <8 x i8>, <8 x i8>* %p, align 4
   ret <8 x i8> %v
@@ -328,7 +328,7 @@ define <8 x i8> @load_ext_v8i16_a8(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <8 x i8>, <8 x i8>* %p, align 8
   ret <8 x i8> %v
@@ -340,7 +340,7 @@ define <8 x i8> @load_ext_v8i16_a16(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 0
+; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <8 x i8>, <8 x i8>* %p, align 16
   ret <8 x i8> %v
@@ -636,7 +636,7 @@ define <4 x i16> @load_ext_v4i32_a1(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32_a1 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 0:p2align=0
+; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <4 x i16>, <4 x i16>* %p, align 1
   ret <4 x i16> %v
@@ -647,7 +647,7 @@ define <4 x i16> @load_ext_v4i32_a2(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32_a2 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 0:p2align=1
+; CHECK-NEXT:    v128.load64_zero 0:p2align=1
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <4 x i16>, <4 x i16>* %p, align 2
   ret <4 x i16> %v
@@ -658,7 +658,7 @@ define <4 x i16> @load_ext_v4i32_a4(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32_a4 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 0:p2align=2
+; CHECK-NEXT:    v128.load64_zero 0:p2align=2
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <4 x i16>, <4 x i16>* %p, align 4
   ret <4 x i16> %v
@@ -670,7 +670,7 @@ define <4 x i16> @load_ext_v4i32_a8(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32_a8 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <4 x i16>, <4 x i16>* %p, align 8
   ret <4 x i16> %v
@@ -682,7 +682,7 @@ define <4 x i16> @load_ext_v4i32_a16(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32_a16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 0
+; CHECK-NEXT:    v128.load 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <4 x i16>, <4 x i16>* %p, align 16
   ret <4 x i16> %v

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-nonconst-sext.ll b/llvm/test/CodeGen/WebAssembly/simd-nonconst-sext.ll
deleted file mode 100644
index b8c734ec3aa89..0000000000000
--- a/llvm/test/CodeGen/WebAssembly/simd-nonconst-sext.ll
+++ /dev/null
@@ -1,19 +0,0 @@
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -mattr=+simd128 | FileCheck %s
-
-; A regression test for a bug in the lowering of SIGN_EXTEND_INREG
-; with SIMD and without sign-ext where ISel would crash if the index
-; of the vector extract was not a constant.
-
-target triple = "wasm32"
-
-; CHECK-LABEL: foo:
-; CHECK-NEXT: .functype foo () -> (f32)
-; CHECK: i32x4.load16x4_u
-; CHECK: f32.convert_i32_s
-define float @foo() {
-  %1 = load <4 x i16>, <4 x i16>* undef, align 8
-  %2 = load i32, i32* undef, align 4
-  %vecext = extractelement <4 x i16> %1, i32 %2
-  %conv = sitofp i16 %vecext to float
-  ret float %conv
-}

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-offset.ll
index b72ded143a296..62d32b6be4f7a 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-offset.ll
@@ -402,7 +402,7 @@ define <8 x i8> @load_ext_v8i16(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <8 x i8>, <8 x i8>* %p
   ret <8 x i8> %v
@@ -473,7 +473,7 @@ define <8 x i8> @load_ext_v8i16_with_folded_offset(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 16
+; CHECK-NEXT:    v128.load64_zero 16
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <8 x i8>* %p to i32
   %r = add nuw i32 %q, 16
@@ -539,7 +539,7 @@ define <8 x i8> @load_ext_v8i16_with_folded_gep_offset(<8 x i8>* %p) {
 ; CHECK:         .functype load_ext_v8i16_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.load8x8_u 8
+; CHECK-NEXT:    v128.load64_zero 8
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 1
   %v = load <8 x i8>, <8 x i8>* %s
@@ -613,7 +613,7 @@ define <8 x i8> @load_ext_v8i16_with_unfolded_gep_negative_offset(<8 x i8>* %p)
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const -8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i16x8.load8x8_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 -1
   %v = load <8 x i8>, <8 x i8>* %s
@@ -695,7 +695,7 @@ define <8 x i8> @load_ext_v8i16_with_unfolded_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 16
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i16x8.load8x8_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <8 x i8>* %p to i32
   %r = add nsw i32 %q, 16
@@ -771,7 +771,7 @@ define <8 x i8> @load_ext_v8i16_with_unfolded_gep_offset(<8 x i8>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i16x8.load8x8_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr <8 x i8>, <8 x i8>* %p, i32 1
   %v = load <8 x i8>, <8 x i8>* %s
@@ -835,7 +835,7 @@ define <8 x i8> @load_ext_v8i16_from_numeric_address() {
 ; CHECK:         .functype load_ext_v8i16_from_numeric_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i16x8.load8x8_u 32
+; CHECK-NEXT:    v128.load64_zero 32
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 32 to <8 x i8>*
   %v = load <8 x i8>, <8 x i8>* %s
@@ -898,7 +898,7 @@ define <8 x i8> @load_ext_v8i16_from_global_address() {
 ; CHECK:         .functype load_ext_v8i16_from_global_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i16x8.load8x8_u gv_v8i8
+; CHECK-NEXT:    v128.load64_zero gv_v8i8
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <8 x i8>, <8 x i8>* @gv_v8i8
   ret <8 x i8> %v
@@ -922,13 +922,8 @@ define void @store_narrowing_v8i16(<8 x i8> %v, <8 x i8>* %p) {
 ; CHECK:         .functype store_narrowing_v8i16 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   store <8 x i8> %v, <8 x i8>* %p
   ret void
@@ -954,13 +949,10 @@ define void @store_narrowing_v8i16_with_folded_offset(<8 x i8> %v, <8 x i8>* %p)
 ; CHECK:         .functype store_narrowing_v8i16_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
+; CHECK-NEXT:    i32.const 16
+; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 16
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <8 x i8>* %p to i32
   %r = add nuw i32 %q, 16
@@ -987,13 +979,10 @@ define void @store_narrowing_v8i16_with_folded_gep_offset(<8 x i8> %v, <8 x i8>*
 ; CHECK:         .functype store_narrowing_v8i16_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
+; CHECK-NEXT:    i32.const 8
+; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 8
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 1
   store <8 x i8> %v , <8 x i8>* %s
@@ -1022,13 +1011,8 @@ define void @store_narrowing_v8i16_with_unfolded_gep_negative_offset(<8 x i8> %v
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    i32.const -8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <8 x i8>, <8 x i8>* %p, i32 -1
   store <8 x i8> %v , <8 x i8>* %s
@@ -1059,13 +1043,8 @@ define void @store_narrowing_v8i16_with_unfolded_offset(<8 x i8> %v, <8 x i8>* %
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    i32.const 16
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <8 x i8>* %p to i32
   %r = add nsw i32 %q, 16
@@ -1096,13 +1075,8 @@ define void @store_narrowing_v8i16_with_unfolded_gep_offset(<8 x i8> %v, <8 x i8
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    i32.const 8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr <8 x i8>, <8 x i8>* %p, i32 1
   store <8 x i8> %v , <8 x i8>* %s
@@ -1126,14 +1100,9 @@ define void @store_narrowing_v8i16_to_numeric_address(<8 x i8> %v, <8 x i8>* %p)
 ; CHECK-LABEL: store_narrowing_v8i16_to_numeric_address:
 ; CHECK:         .functype store_narrowing_v8i16_to_numeric_address (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
+; CHECK-NEXT:    i32.const 32
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 32
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 32 to <8 x i8>*
   store <8 x i8> %v , <8 x i8>* %s
@@ -1156,14 +1125,9 @@ define void @store_narrowing_v8i16_to_global_address(<8 x i8> %v) {
 ; CHECK-LABEL: store_narrowing_v8i16_to_global_address:
 ; CHECK:         .functype store_narrowing_v8i16_to_global_address (v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.const 255, 255, 255, 255, 255, 255, 255, 255
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
+; CHECK-NEXT:    i32.const gv_v8i8
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i8x16.narrow_i16x8_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store gv_v8i8
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   store <8 x i8> %v , <8 x i8>* @gv_v8i8
   ret void
@@ -1225,7 +1189,7 @@ define <4 x i16> @load_ext_v4i32(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <4 x i16>, <4 x i16>* %p
   ret <4 x i16> %v
@@ -1296,7 +1260,7 @@ define <4 x i16> @load_ext_v4i32_with_folded_offset(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 16
+; CHECK-NEXT:    v128.load64_zero 16
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <4 x i16>* %p to i32
   %r = add nuw i32 %q, 16
@@ -1362,7 +1326,7 @@ define <4 x i16> @load_ext_v4i32_with_folded_gep_offset(<4 x i16>* %p) {
 ; CHECK:         .functype load_ext_v4i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i32x4.load16x4_u 8
+; CHECK-NEXT:    v128.load64_zero 8
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 1
   %v = load <4 x i16>, <4 x i16>* %s
@@ -1436,7 +1400,7 @@ define <4 x i16> @load_ext_v4i32_with_unfolded_gep_negative_offset(<4 x i16>* %p
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const -8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i32x4.load16x4_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 -1
   %v = load <4 x i16>, <4 x i16>* %s
@@ -1518,7 +1482,7 @@ define <4 x i16> @load_ext_v4i32_with_unfolded_offset(<4 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 16
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i32x4.load16x4_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <4 x i16>* %p to i32
   %r = add nsw i32 %q, 16
@@ -1594,7 +1558,7 @@ define <4 x i16> @load_ext_v4i32_with_unfolded_gep_offset(<4 x i16>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i32x4.load16x4_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr <4 x i16>, <4 x i16>* %p, i32 1
   %v = load <4 x i16>, <4 x i16>* %s
@@ -1658,7 +1622,7 @@ define <4 x i16> @load_ext_v4i32_from_numeric_address() {
 ; CHECK:         .functype load_ext_v4i32_from_numeric_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i32x4.load16x4_u 32
+; CHECK-NEXT:    v128.load64_zero 32
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 32 to <4 x i16>*
   %v = load <4 x i16>, <4 x i16>* %s
@@ -1721,7 +1685,7 @@ define <4 x i16> @load_ext_v4i32_from_global_address() {
 ; CHECK:         .functype load_ext_v4i32_from_global_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i32x4.load16x4_u gv_v4i16
+; CHECK-NEXT:    v128.load64_zero gv_v4i16
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <4 x i16>, <4 x i16>* @gv_v4i16
   ret <4 x i16> %v
@@ -1744,13 +1708,8 @@ define void @store_narrowing_v4i32(<4 x i16> %v, <4 x i16>* %p) {
 ; CHECK:         .functype store_narrowing_v4i32 (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   store <4 x i16> %v , <4 x i16>* %p
   ret void
@@ -1776,13 +1735,10 @@ define void @store_narrowing_v4i32_with_folded_offset(<4 x i16> %v, <4 x i16>* %
 ; CHECK:         .functype store_narrowing_v4i32_with_folded_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
+; CHECK-NEXT:    i32.const 16
+; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 16
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <4 x i16>* %p to i32
   %r = add nuw i32 %q, 16
@@ -1809,13 +1765,10 @@ define void @store_narrowing_v4i32_with_folded_gep_offset(<4 x i16> %v, <4 x i16
 ; CHECK:         .functype store_narrowing_v4i32_with_folded_gep_offset (v128, i32) -> ()
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
+; CHECK-NEXT:    i32.const 8
+; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 8
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 1
   store <4 x i16> %v , <4 x i16>* %s
@@ -1844,13 +1797,8 @@ define void @store_narrowing_v4i32_with_unfolded_gep_negative_offset(<4 x i16> %
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    i32.const -8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <4 x i16>, <4 x i16>* %p, i32 -1
   store <4 x i16> %v , <4 x i16>* %s
@@ -1881,13 +1829,8 @@ define void @store_narrowing_v4i32_with_unfolded_offset(<4 x i16> %v, <4 x i16>*
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    i32.const 16
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <4 x i16>* %p to i32
   %r = add nsw i32 %q, 16
@@ -1918,13 +1861,8 @@ define void @store_narrowing_v4i32_with_unfolded_gep_offset(<4 x i16> %v, <4 x i
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    i32.const 8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 0
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr <4 x i16>, <4 x i16>* %p, i32 1
   store <4 x i16> %v , <4 x i16>* %s
@@ -1948,14 +1886,9 @@ define void @store_narrowing_v4i32_to_numeric_address(<4 x i16> %v) {
 ; CHECK-LABEL: store_narrowing_v4i32_to_numeric_address:
 ; CHECK:         .functype store_narrowing_v4i32_to_numeric_address (v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
+; CHECK-NEXT:    i32.const 32
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store 32
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 32 to <4 x i16>*
   store <4 x i16> %v , <4 x i16>* %s
@@ -1978,14 +1911,9 @@ define void @store_narrowing_v4i32_to_global_address(<4 x i16> %v) {
 ; CHECK-LABEL: store_narrowing_v4i32_to_global_address:
 ; CHECK:         .functype store_narrowing_v4i32_to_global_address (v128) -> ()
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.const 65535, 65535, 65535, 65535
+; CHECK-NEXT:    i32.const gv_v4i16
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.and
-; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i16x8.narrow_i32x4_u
-; CHECK-NEXT:    i64x2.extract_lane 0
-; CHECK-NEXT:    i64.store gv_v4i16
+; CHECK-NEXT:    v128.store64_lane 0, 0
 ; CHECK-NEXT:    # fallthrough-return
   store <4 x i16> %v , <4 x i16>* @gv_v4i16
   ret void
@@ -2047,7 +1975,7 @@ define <2 x i32> @load_ext_v2i64(<2 x i32>* %p) {
 ; CHECK:         .functype load_ext_v2i64 (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i64x2.load32x2_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <2 x i32>, <2 x i32>* %p
   ret <2 x i32> %v
@@ -2118,7 +2046,7 @@ define <2 x i32> @load_ext_v2i64_with_folded_offset(<2 x i32>* %p) {
 ; CHECK:         .functype load_ext_v2i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i64x2.load32x2_u 16
+; CHECK-NEXT:    v128.load64_zero 16
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <2 x i32>* %p to i32
   %r = add nuw i32 %q, 16
@@ -2184,7 +2112,7 @@ define <2 x i32> @load_ext_v2i64_with_folded_gep_offset(<2 x i32>* %p) {
 ; CHECK:         .functype load_ext_v2i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    i64x2.load32x2_u 8
+; CHECK-NEXT:    v128.load64_zero 8
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 1
   %v = load <2 x i32>, <2 x i32>* %s
@@ -2258,7 +2186,7 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_gep_negative_offset(<2 x i32>* %p
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const -8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i64x2.load32x2_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds <2 x i32>, <2 x i32>* %p, i32 -1
   %v = load <2 x i32>, <2 x i32>* %s
@@ -2340,7 +2268,7 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 16
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i64x2.load32x2_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint <2 x i32>* %p to i32
   %r = add nsw i32 %q, 16
@@ -2416,7 +2344,7 @@ define <2 x i32> @load_ext_v2i64_with_unfolded_gep_offset(<2 x i32>* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 8
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    i64x2.load32x2_u 0
+; CHECK-NEXT:    v128.load64_zero 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr <2 x i32>, <2 x i32>* %p, i32 1
   %v = load <2 x i32>, <2 x i32>* %s
@@ -2480,7 +2408,7 @@ define <2 x i32> @load_ext_v2i64_from_numeric_address() {
 ; CHECK:         .functype load_ext_v2i64_from_numeric_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64x2.load32x2_u 32
+; CHECK-NEXT:    v128.load64_zero 32
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 32 to <2 x i32>*
   %v = load <2 x i32>, <2 x i32>* %s
@@ -2543,7 +2471,7 @@ define <2 x i32> @load_ext_v2i64_from_global_address() {
 ; CHECK:         .functype load_ext_v2i64_from_global_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64x2.load32x2_u gv_v2i32
+; CHECK-NEXT:    v128.load64_zero gv_v2i32
 ; CHECK-NEXT:    # fallthrough-return
   %v = load <2 x i32>, <2 x i32>* @gv_v2i32
   ret <2 x i32> %v

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll b/llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll
deleted file mode 100644
index 70982c763614b..0000000000000
--- a/llvm/test/CodeGen/WebAssembly/simd-scalar-to-vector.ll
+++ /dev/null
@@ -1,42 +0,0 @@
-; RUN: llc < %s -asm-verbose=false -verify-machineinstrs -disable-wasm-fallthrough-return-opt -wasm-disable-explicit-locals -wasm-keep-registers -mattr=+simd128 | FileCheck %s
-
-; Test that scalar_to_vector is lowered into a splat correctly.
-; This bugpoint-reduced code turns into the selection dag below.
-; TODO: find small test cases that produce scalar_to_vector dag nodes
-; to make this test more readable and comprehensive.
-
-;   t0: ch = EntryToken
-; t32: i32,ch = load<(load 4 from `<2 x i16>* undef`, align 1)> t0, undef:i32, undef:i32
-;   t33: v4i32 = scalar_to_vector t32
-; t34: v8i16 = bitcast t33
-;       t51: i32 = extract_vector_elt t34, Constant:i32<0>
-;   t52: ch = store<(store 2 into `<4 x i16>* undef`, align 1), trunc to i16> t32:1, t51, undef:i32, undef:i32
-;       t50: i32 = extract_vector_elt t34, Constant:i32<1>
-;     t53: ch = store<(store 2 into `<4 x i16>* undef` + 2, align 1), trunc to i16> t32:1, t50, undef:i32, undef:i32
-;       t49: i32 = extract_vector_elt t34, Constant:i32<2>
-;     t55: ch = store<(store 2 into `<4 x i16>* undef` + 4, align 1), trunc to i16> t32:1, t49, undef:i32, undef:i32
-;       t48: i32 = extract_vector_elt t34, Constant:i32<3>
-;     t57: ch = store<(store 2 into `<4 x i16>* undef` + 6, align 1), trunc to i16> t32:1, t48, undef:i32, undef:i32
-;   t58: ch = TokenFactor t52, t53, t55, t57
-; t24: ch = WebAssemblyISD::RETURN t58
-
-target triple = "wasm32-unknown-unknown"
-
-; CHECK-LABEL: foo:
-; CHECK: i64x2.splat
-define void @foo() {
-entry:
-  %a = load <2 x i16>, <2 x i16>* undef, align 1
-  %b = shufflevector <2 x i16> %a, <2 x i16> undef, <8 x i32> <i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
-  %0 = bitcast <8 x i16> %b to <16 x i8>
-  %shuffle.i214 = shufflevector <16 x i8> %0, <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef, i8 undef>, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
-  %1 = bitcast <16 x i8> %shuffle.i214 to <8 x i16>
-  %add82 = add <8 x i16> %1, zeroinitializer
-  %2 = select <8 x i1> undef, <8 x i16> undef, <8 x i16> %add82
-  %3 = bitcast <8 x i16> %2 to <16 x i8>
-  %shuffle.i204 = shufflevector <16 x i8> %3, <16 x i8> undef, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
-  %4 = bitcast <16 x i8> %shuffle.i204 to <8 x i16>
-  %dst2.0.vec.extract = shufflevector <8 x i16> %4, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-  store <4 x i16> %dst2.0.vec.extract, <4 x i16>* undef, align 1
-  ret void
-}


        


More information about the llvm-commits mailing list