[llvm] 89333b3 - [WebAssembly] Set alignment to 1 for SIMD memory intrinsics

Thomas Lively via llvm-commits llvm-commits at lists.llvm.org
Wed May 5 11:59:40 PDT 2021


Author: Thomas Lively
Date: 2021-05-05T11:59:33-07:00
New Revision: 89333b35a7a909d29ae53fddcfb4792d87223b96

URL: https://github.com/llvm/llvm-project/commit/89333b35a7a909d29ae53fddcfb4792d87223b96
DIFF: https://github.com/llvm/llvm-project/commit/89333b35a7a909d29ae53fddcfb4792d87223b96.diff

LOG: [WebAssembly] Set alignment to 1 for SIMD memory intrinsics

The WebAssembly SIMD intrinsics in wasm_simd128.h generally try not to require
any particular alignment for memory operations to be maximally flexible. For
builtin memory access functions and their corresponding LLVM IR intrinsics,
there's no way to set the expected alignment, so the best we can do is set the
alignment to 1 in the backend. This change means that the alignment hints in the
emitted code will no longer be incorrect when users use the intrinsics to access
unaligned data.

Differential Revision: https://reviews.llvm.org/D101850

Added: 
    

Modified: 
    llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
    llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
    llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 51e621dbcdc6b..9bcee59253401 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -724,7 +724,7 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
     Info.memVT = Intrinsic == Intrinsic::wasm_load32_zero ? MVT::i32 : MVT::i64;
     Info.ptrVal = I.getArgOperand(0);
     Info.offset = 0;
-    Info.align = Info.memVT == MVT::i32 ? Align(4) : Align(8);
+    Info.align = Align(1);
     Info.flags = MachineMemOperand::MOLoad;
     return true;
   case Intrinsic::wasm_load8_lane:
@@ -736,27 +736,22 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
   case Intrinsic::wasm_store32_lane:
   case Intrinsic::wasm_store64_lane: {
     MVT MemVT;
-    Align MemAlign;
     switch (Intrinsic) {
     case Intrinsic::wasm_load8_lane:
     case Intrinsic::wasm_store8_lane:
       MemVT = MVT::i8;
-      MemAlign = Align(1);
       break;
     case Intrinsic::wasm_load16_lane:
     case Intrinsic::wasm_store16_lane:
       MemVT = MVT::i16;
-      MemAlign = Align(2);
       break;
     case Intrinsic::wasm_load32_lane:
     case Intrinsic::wasm_store32_lane:
       MemVT = MVT::i32;
-      MemAlign = Align(4);
       break;
     case Intrinsic::wasm_load64_lane:
     case Intrinsic::wasm_store64_lane:
       MemVT = MVT::i64;
-      MemAlign = Align(8);
       break;
     default:
       llvm_unreachable("unexpected intrinsic");
@@ -774,7 +769,7 @@ bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
     Info.ptrVal = I.getArgOperand(0);
     Info.memVT = MemVT;
     Info.offset = 0;
-    Info.align = MemAlign;
+    Info.align = Align(1);
     return true;
   }
   default:

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
index 08c3f80e57bf8..a91a864e0421f 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-lane-offset.ll
@@ -266,7 +266,7 @@ define <8 x i16> @load_lane_i16_no_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %p, <8 x i16> %v, i32 0)
   ret <8 x i16> %t
@@ -280,7 +280,7 @@ define <8 x i16> @load_lane_i16_with_folded_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i16* %p to i32
   %r = add nuw i32 %q, 24
@@ -297,7 +297,7 @@ define <8 x i16> @load_lane_i16_with_folded_gep_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    i32.const 12
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i16, i16* %p, i32 6
   %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -312,7 +312,7 @@ define <8 x i16> @load_lane_i16_with_unfolded_gep_negative_offset(i16* %p, <8 x
 ; CHECK-NEXT:    i32.const -12
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i16, i16* %p, i32 -6
   %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -327,7 +327,7 @@ define <8 x i16> @load_lane_i16_with_unfolded_offset(i16* %p, <8 x i16> %v) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i16* %p to i32
   %r = add nsw i32 %q, 24
@@ -344,7 +344,7 @@ define <8 x i16> @load_lane_i16_with_unfolded_gep_offset(i16* %p, <8 x i16> %v)
 ; CHECK-NEXT:    i32.const 12
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i16, i16* %p, i32 6
   %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -357,7 +357,7 @@ define <8 x i16> @load_lane_i16_from_numeric_address(<8 x i16> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 42
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i16*
   %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -371,7 +371,7 @@ define <8 x i16> @load_lane_i16_from_global_address(<8 x i16> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const gv_i16
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load16_lane 0, 0
+; CHECK-NEXT:    v128.load16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <8 x i16> @llvm.wasm.load16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
   ret <8 x i16> %t
@@ -383,7 +383,7 @@ define void @store_lane_i16_no_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   tail call void @llvm.wasm.store16.lane(i16* %p, <8 x i16> %v, i32 0)
   ret void
@@ -397,7 +397,7 @@ define void @store_lane_i16_with_folded_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i16* %p to i32
   %r = add nuw i32 %q, 24
@@ -414,7 +414,7 @@ define void @store_lane_i16_with_folded_gep_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    i32.const 12
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i16, i16* %p, i32 6
   tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -429,7 +429,7 @@ define void @store_lane_i16_with_unfolded_gep_negative_offset(<8 x i16> %v, i16*
 ; CHECK-NEXT:    i32.const -12
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i16, i16* %p, i32 -6
   tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -444,7 +444,7 @@ define void @store_lane_i16_with_unfolded_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i16* %p to i32
   %r = add nsw i32 %q, 24
@@ -461,7 +461,7 @@ define void @store_lane_i16_with_unfolded_gep_offset(<8 x i16> %v, i16* %p) {
 ; CHECK-NEXT:    i32.const 12
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i16, i16* %p, i32 6
   tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -474,7 +474,7 @@ define void @store_lane_i16_to_numeric_address(<8 x i16> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 42
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i16*
   tail call void @llvm.wasm.store16.lane(i16* %s, <8 x i16> %v, i32 0)
@@ -487,7 +487,7 @@ define void @store_lane_i16_from_global_address(<8 x i16> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const gv_i16
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store16_lane 0, 0
+; CHECK-NEXT:    v128.store16_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   tail call void @llvm.wasm.store16.lane(i16* @gv_i16, <8 x i16> %v, i32 0)
   ret void
@@ -503,7 +503,7 @@ define <4 x i32> @load_lane_i32_no_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %p, <4 x i32> %v, i32 0)
   ret <4 x i32> %t
@@ -517,7 +517,7 @@ define <4 x i32> @load_lane_i32_with_folded_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -534,7 +534,7 @@ define <4 x i32> @load_lane_i32_with_folded_gep_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i32, i32* %p, i32 6
   %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -549,7 +549,7 @@ define <4 x i32> @load_lane_i32_with_unfolded_gep_negative_offset(i32* %p, <4 x
 ; CHECK-NEXT:    i32.const -24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i32, i32* %p, i32 -6
   %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -564,7 +564,7 @@ define <4 x i32> @load_lane_i32_with_unfolded_offset(i32* %p, <4 x i32> %v) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i32* %p to i32
   %r = add nsw i32 %q, 24
@@ -581,7 +581,7 @@ define <4 x i32> @load_lane_i32_with_unfolded_gep_offset(i32* %p, <4 x i32> %v)
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i32, i32* %p, i32 6
   %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -594,7 +594,7 @@ define <4 x i32> @load_lane_i32_from_numeric_address(<4 x i32> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 42
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i32*
   %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -608,7 +608,7 @@ define <4 x i32> @load_lane_i32_from_global_address(<4 x i32> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const gv_i32
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load32_lane 0, 0
+; CHECK-NEXT:    v128.load32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <4 x i32> @llvm.wasm.load32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
   ret <4 x i32> %t
@@ -620,7 +620,7 @@ define void @store_lane_i32_no_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   tail call void @llvm.wasm.store32.lane(i32* %p, <4 x i32> %v, i32 0)
   ret void
@@ -634,7 +634,7 @@ define void @store_lane_i32_with_folded_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -651,7 +651,7 @@ define void @store_lane_i32_with_folded_gep_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i32, i32* %p, i32 6
   tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -666,7 +666,7 @@ define void @store_lane_i32_with_unfolded_gep_negative_offset(<4 x i32> %v, i32*
 ; CHECK-NEXT:    i32.const -24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i32, i32* %p, i32 -6
   tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -681,7 +681,7 @@ define void @store_lane_i32_with_unfolded_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i32* %p to i32
   %r = add nsw i32 %q, 24
@@ -698,7 +698,7 @@ define void @store_lane_i32_with_unfolded_gep_offset(<4 x i32> %v, i32* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i32, i32* %p, i32 6
   tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -711,7 +711,7 @@ define void @store_lane_i32_to_numeric_address(<4 x i32> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 42
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i32*
   tail call void @llvm.wasm.store32.lane(i32* %s, <4 x i32> %v, i32 0)
@@ -724,7 +724,7 @@ define void @store_lane_i32_from_global_address(<4 x i32> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const gv_i32
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store32_lane 0, 0
+; CHECK-NEXT:    v128.store32_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   tail call void @llvm.wasm.store32.lane(i32* @gv_i32, <4 x i32> %v, i32 0)
   ret void
@@ -740,7 +740,7 @@ define <2 x i64> @load_lane_i64_no_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %p, <2 x i64> %v, i32 0)
   ret <2 x i64> %t
@@ -754,7 +754,7 @@ define <2 x i64> @load_lane_i64_with_folded_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i64* %p to i32
   %r = add nuw i32 %q, 24
@@ -771,7 +771,7 @@ define <2 x i64> @load_lane_i64_with_folded_gep_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    i32.const 48
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i64, i64* %p, i32 6
   %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -786,7 +786,7 @@ define <2 x i64> @load_lane_i64_with_unfolded_gep_negative_offset(i64* %p, <2 x
 ; CHECK-NEXT:    i32.const -48
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i64, i64* %p, i32 -6
   %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -801,7 +801,7 @@ define <2 x i64> @load_lane_i64_with_unfolded_offset(i64* %p, <2 x i64> %v) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i64* %p to i32
   %r = add nsw i32 %q, 24
@@ -818,7 +818,7 @@ define <2 x i64> @load_lane_i64_with_unfolded_gep_offset(i64* %p, <2 x i64> %v)
 ; CHECK-NEXT:    i32.const 48
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 1
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i64, i64* %p, i32 6
   %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -831,7 +831,7 @@ define <2 x i64> @load_lane_i64_from_numeric_address(<2 x i64> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 42
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i64*
   %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -845,7 +845,7 @@ define <2 x i64> @load_lane_i64_from_global_address(<2 x i64> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const gv_i64
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load64_lane 0, 0
+; CHECK-NEXT:    v128.load64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <2 x i64> @llvm.wasm.load64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
   ret <2 x i64> %t
@@ -857,7 +857,7 @@ define void @store_lane_i64_no_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 1
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   tail call void @llvm.wasm.store64.lane(i64* %p, <2 x i64> %v, i32 0)
   ret void
@@ -871,7 +871,7 @@ define void @store_lane_i64_with_folded_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i64* %p to i32
   %r = add nuw i32 %q, 24
@@ -888,7 +888,7 @@ define void @store_lane_i64_with_folded_gep_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    i32.const 48
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i64, i64* %p, i32 6
   tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -903,7 +903,7 @@ define void @store_lane_i64_with_unfolded_gep_negative_offset(<2 x i64> %v, i64*
 ; CHECK-NEXT:    i32.const -48
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i64, i64* %p, i32 -6
   tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -918,7 +918,7 @@ define void @store_lane_i64_with_unfolded_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i64* %p to i32
   %r = add nsw i32 %q, 24
@@ -935,7 +935,7 @@ define void @store_lane_i64_with_unfolded_gep_offset(<2 x i64> %v, i64* %p) {
 ; CHECK-NEXT:    i32.const 48
 ; CHECK-NEXT:    i32.add
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i64, i64* %p, i32 6
   tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -948,7 +948,7 @@ define void @store_lane_i64_to_numeric_address(<2 x i64> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 42
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i64*
   tail call void @llvm.wasm.store64.lane(i64* %s, <2 x i64> %v, i32 0)
@@ -961,7 +961,7 @@ define void @store_lane_i64_from_global_address(<2 x i64> %v) {
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const gv_i64
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.store64_lane 0, 0
+; CHECK-NEXT:    v128.store64_lane 0:p2align=0, 0
 ; CHECK-NEXT:    # fallthrough-return
   tail call void @llvm.wasm.store64.lane(i64* @gv_i64, <2 x i64> %v, i32 0)
   ret void

diff  --git a/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll b/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll
index ab3643653deb4..3c4cc58f5082a 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-load-zero-offset.ll
@@ -18,7 +18,7 @@ define <4 x i32> @load_zero_i32_no_offset(i32* %p) {
 ; CHECK:         .functype load_zero_i32_no_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load32_zero 0
+; CHECK-NEXT:    v128.load32_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %v = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %p)
   ret <4 x i32> %v
@@ -29,7 +29,7 @@ define <4 x i32> @load_zero_i32_with_folded_offset(i32* %p) {
 ; CHECK:         .functype load_zero_i32_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load32_zero 24
+; CHECK-NEXT:    v128.load32_zero 24:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i32* %p to i32
   %r = add nuw i32 %q, 24
@@ -43,7 +43,7 @@ define <4 x i32> @load_zero_i32_with_folded_gep_offset(i32* %p) {
 ; CHECK:         .functype load_zero_i32_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load32_zero 24
+; CHECK-NEXT:    v128.load32_zero 24:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i32, i32* %p, i32 6
   %t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
@@ -57,7 +57,7 @@ define <4 x i32> @load_zero_i32_with_unfolded_gep_negative_offset(i32* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const -24
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.load32_zero 0
+; CHECK-NEXT:    v128.load32_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i32, i32* %p, i32 -6
   %t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
@@ -71,7 +71,7 @@ define <4 x i32> @load_zero_i32_with_unfolded_offset(i32* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.load32_zero 0
+; CHECK-NEXT:    v128.load32_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i32* %p to i32
   %r = add nsw i32 %q, 24
@@ -87,7 +87,7 @@ define <4 x i32> @load_zero_i32_with_unfolded_gep_offset(i32* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.load32_zero 0
+; CHECK-NEXT:    v128.load32_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i32, i32* %p, i32 6
   %t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
@@ -99,7 +99,7 @@ define <4 x i32> @load_zero_i32_from_numeric_address() {
 ; CHECK:         .functype load_zero_i32_from_numeric_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.load32_zero 42
+; CHECK-NEXT:    v128.load32_zero 42:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i32*
   %t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* %s)
@@ -112,7 +112,7 @@ define <4 x i32> @load_zero_i32_from_global_address() {
 ; CHECK:         .functype load_zero_i32_from_global_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.load32_zero gv_i32
+; CHECK-NEXT:    v128.load32_zero gv_i32:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <4 x i32> @llvm.wasm.load32.zero(i32* @gv_i32)
   ret <4 x i32> %t
@@ -127,7 +127,7 @@ define <2 x i64> @load_zero_i64_no_offset(i64* %p) {
 ; CHECK:         .functype load_zero_i64_no_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load64_zero 0
+; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %v = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %p)
   ret <2 x i64> %v
@@ -138,7 +138,7 @@ define <2 x i64> @load_zero_i64_with_folded_offset(i64* %p) {
 ; CHECK:         .functype load_zero_i64_with_folded_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load64_zero 24
+; CHECK-NEXT:    v128.load64_zero 24:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i64* %p to i32
   %r = add nuw i32 %q, 24
@@ -152,7 +152,7 @@ define <2 x i64> @load_zero_i64_with_folded_gep_offset(i64* %p) {
 ; CHECK:         .functype load_zero_i64_with_folded_gep_offset (i32) -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    local.get 0
-; CHECK-NEXT:    v128.load64_zero 48
+; CHECK-NEXT:    v128.load64_zero 48:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i64, i64* %p, i64 6
   %t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
@@ -166,7 +166,7 @@ define <2 x i64> @load_zero_i64_with_unfolded_gep_negative_offset(i64* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const -48
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.load64_zero 0
+; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr inbounds i64, i64* %p, i64 -6
   %t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
@@ -180,7 +180,7 @@ define <2 x i64> @load_zero_i64_with_unfolded_offset(i64* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 24
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.load64_zero 0
+; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %q = ptrtoint i64* %p to i32
   %r = add nsw i32 %q, 24
@@ -196,7 +196,7 @@ define <2 x i64> @load_zero_i64_with_unfolded_gep_offset(i64* %p) {
 ; CHECK-NEXT:    local.get 0
 ; CHECK-NEXT:    i32.const 48
 ; CHECK-NEXT:    i32.add
-; CHECK-NEXT:    v128.load64_zero 0
+; CHECK-NEXT:    v128.load64_zero 0:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = getelementptr i64, i64* %p, i64 6
   %t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
@@ -208,7 +208,7 @@ define <2 x i64> @load_zero_i64_from_numeric_address() {
 ; CHECK:         .functype load_zero_i64_from_numeric_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.load64_zero 42
+; CHECK-NEXT:    v128.load64_zero 42:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %s = inttoptr i32 42 to i64*
   %t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* %s)
@@ -221,7 +221,7 @@ define <2 x i64> @load_zero_i64_from_global_address() {
 ; CHECK:         .functype load_zero_i64_from_global_address () -> (v128)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    v128.load64_zero gv_i64
+; CHECK-NEXT:    v128.load64_zero gv_i64:p2align=0
 ; CHECK-NEXT:    # fallthrough-return
   %t = tail call <2 x i64> @llvm.wasm.load64.zero(i64* @gv_i64)
   ret <2 x i64> %t


        


More information about the llvm-commits mailing list