[llvm] [WebAssembly] Fix lowering of (extending) loads from addrspace(1) globals (PR #155937)

via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 2 21:49:09 PDT 2025


https://github.com/QuantumSegfault updated https://github.com/llvm/llvm-project/pull/155937

>From 1f99e889a20a138e45571fe34cbd11aeaba59829 Mon Sep 17 00:00:00 2001
From: QuantumSegfault <fungi-turbos-7l at icloud.com>
Date: Tue, 2 Sep 2025 20:55:35 -0700
Subject: [PATCH 1/2] Pre-commit tests for fixing (ext)loads from WASM globals

---
 .../WebAssembly/lower-load-wasm-global.ll     | 177 ++++++++++++++++++
 1 file changed, 177 insertions(+)
 create mode 100644 llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll

diff --git a/llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll b/llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll
new file mode 100644
index 0000000000000..3da1ad9e36831
--- /dev/null
+++ b/llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll
@@ -0,0 +1,177 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s | FileCheck %s
+
+; Test various loads from WASM (address space 1) globals lower as intended
+
+target triple = "wasm32-unknown-unknown"
+
+
+ at globalI8 = local_unnamed_addr addrspace(1) global i8 undef
+ at globalI32 = local_unnamed_addr addrspace(1) global i32 undef
+ at globalI64 = local_unnamed_addr addrspace(1) global i64 undef
+
+
+define i32 @zext_i8_i32() {
+; CHECK-LABEL: zext_i8_i32:
+; CHECK:         .functype zext_i8_i32 () -> (i32)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i32.load8_u globalI32
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i8, ptr addrspace(1) @globalI32
+  %e = zext i8 %v to i32
+  ret i32 %e
+}
+
+define i32 @sext_i8_i32() {
+; CHECK-LABEL: sext_i8_i32:
+; CHECK:         .functype sext_i8_i32 () -> (i32)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i32.load8_s globalI32
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i8, ptr addrspace(1) @globalI32
+  %e = sext i8 %v to i32
+  ret i32 %e
+}
+
+define i32 @zext_i16_i32() {
+; CHECK-LABEL: zext_i16_i32:
+; CHECK:         .functype zext_i16_i32 () -> (i32)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i32.load16_u globalI32
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i16, ptr addrspace(1) @globalI32
+  %e = zext i16 %v to i32
+  ret i32 %e
+}
+
+define i32 @sext_i16_i32() {
+; CHECK-LABEL: sext_i16_i32:
+; CHECK:         .functype sext_i16_i32 () -> (i32)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i32.load16_s globalI32
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i16, ptr addrspace(1) @globalI32
+  %e = sext i16 %v to i32
+  ret i32 %e
+}
+
+
+define i64 @zext_i8_i64() {
+; CHECK-LABEL: zext_i8_i64:
+; CHECK:         .functype zext_i8_i64 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i64.load8_u globalI64
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i8, ptr addrspace(1) @globalI64
+  %e = zext i8 %v to i64
+  ret i64 %e
+}
+
+define i64 @sext_i8_i64() {
+; CHECK-LABEL: sext_i8_i64:
+; CHECK:         .functype sext_i8_i64 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i64.load8_s globalI64
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i8, ptr addrspace(1) @globalI64
+  %e = sext i8 %v to i64
+  ret i64 %e
+}
+
+define i64 @zext_i16_i64() {
+; CHECK-LABEL: zext_i16_i64:
+; CHECK:         .functype zext_i16_i64 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i64.load16_u globalI64
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i16, ptr addrspace(1) @globalI64
+  %e = zext i16 %v to i64
+  ret i64 %e
+}
+
+define i64 @sext_i16_i64() {
+; CHECK-LABEL: sext_i16_i64:
+; CHECK:         .functype sext_i16_i64 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i64.load16_s globalI64
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i16, ptr addrspace(1) @globalI64
+  %e = sext i16 %v to i64
+  ret i64 %e
+}
+
+define i64 @zext_i32_i64() {
+; CHECK-LABEL: zext_i32_i64:
+; CHECK:         .functype zext_i32_i64 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i64.load32_u globalI64
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i32, ptr addrspace(1) @globalI64
+  %e = zext i32 %v to i64
+  ret i64 %e
+}
+
+define i64 @sext_i32_i64() {
+; CHECK-LABEL: sext_i32_i64:
+; CHECK:         .functype sext_i32_i64 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i64.load32_s globalI64
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i32, ptr addrspace(1) @globalI64
+  %e = sext i32 %v to i64
+  ret i64 %e
+}
+
+
+define i64 @load_i64_from_i32() {
+; CHECK-LABEL: load_i64_from_i32:
+; CHECK:         .functype load_i64_from_i32 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    global.get globalI32
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i64, ptr addrspace(1) @globalI32
+  ret i64 %v
+}
+
+define i32 @load_i32_from_i64() {
+; CHECK-LABEL: load_i32_from_i64:
+; CHECK:         .functype load_i32_from_i64 () -> (i32)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i32, ptr addrspace(1) @globalI64
+  ret i32 %v
+}
+
+define i8 @load_i8() {
+; CHECK-LABEL: load_i8:
+; CHECK:         .functype load_i8 () -> (i32)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i32.load8_u globalI8
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i8, ptr addrspace(1) @globalI8
+  ret i8 %v
+}
+
+define i64 @load_i16_from_i8_zext_to_i64() {
+; CHECK-LABEL: load_i16_from_i8_zext_to_i64:
+; CHECK:         .functype load_i16_from_i8_zext_to_i64 () -> (i64)
+; CHECK-NEXT:  # %bb.0:
+; CHECK-NEXT:    i32.const 0
+; CHECK-NEXT:    i64.load16_u globalI8
+; CHECK-NEXT:    # fallthrough-return
+  %v = load i16, ptr addrspace(1) @globalI8
+  %e = zext i16 %v to i64
+  ret i64 %e
+}

>From a989f21c4bb147bcbe974988dbe78bc134ed76bd Mon Sep 17 00:00:00 2001
From: QuantumSegfault <fungi-turbos-7l at icloud.com>
Date: Thu, 28 Aug 2025 10:43:50 -0700
Subject: [PATCH 2/2] Fix lowering of loads (and extending loads) from
 addrspace(1) globals

---
 .../WebAssembly/WebAssemblyISelLowering.cpp   | 124 +++++++++++++++++-
 .../WebAssembly/lower-load-wasm-global.ll     |  56 ++++----
 2 files changed, 152 insertions(+), 28 deletions(-)

diff --git a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
index 5a45134692865..5a895089c7b5b 100644
--- a/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
+++ b/llvm/lib/Target/WebAssembly/WebAssemblyISelLowering.cpp
@@ -91,6 +91,14 @@ WebAssemblyTargetLowering::WebAssemblyTargetLowering(
     setOperationAction(ISD::LOAD, T, Custom);
     setOperationAction(ISD::STORE, T, Custom);
   }
+
+  // Likewise, transform zext/sext/anyext extending loads from address space 1 (WASM globals)
+  setLoadExtAction({ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}, MVT::i32,
+                   {MVT::i8, MVT::i16}, Custom);
+  setLoadExtAction({ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD}, MVT::i64,
+                   {MVT::i8, MVT::i16, MVT::i32}, Custom);
+
+
   if (Subtarget->hasSIMD128()) {
     for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32, MVT::v2i64,
                    MVT::v2f64}) {
@@ -1707,6 +1715,11 @@ static bool IsWebAssemblyGlobal(SDValue Op) {
   if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
     return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
 
+  if (Op->getOpcode() == WebAssemblyISD::Wrapper)
+    if (const GlobalAddressSDNode *GA =
+            dyn_cast<GlobalAddressSDNode>(Op->getOperand(0)))
+      return WebAssembly::isWasmVarAddressSpace(GA->getAddressSpace());
+
   return false;
 }
 
@@ -1764,16 +1777,119 @@ SDValue WebAssemblyTargetLowering::LowerLoad(SDValue Op,
   LoadSDNode *LN = cast<LoadSDNode>(Op.getNode());
   const SDValue &Base = LN->getBasePtr();
   const SDValue &Offset = LN->getOffset();
+  ISD::LoadExtType ExtType = LN->getExtensionType();
+  EVT ResultType = LN->getValueType(0);
 
   if (IsWebAssemblyGlobal(Base)) {
     if (!Offset->isUndef())
       report_fatal_error(
           "unexpected offset when loading from webassembly global", false);
 
-    SDVTList Tys = DAG.getVTList(LN->getValueType(0), MVT::Other);
-    SDValue Ops[] = {LN->getChain(), Base};
-    return DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
-                                   LN->getMemoryVT(), LN->getMemOperand());
+    if (!ResultType.isInteger() && !ResultType.isFloatingPoint()) {
+      SDVTList Tys = DAG.getVTList(ResultType, MVT::Other);
+      SDValue Ops[] = {LN->getChain(), Base};
+      SDValue GlobalGetNode =
+          DAG.getMemIntrinsicNode(WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops,
+                                  LN->getMemoryVT(), LN->getMemOperand());
+      return GlobalGetNode;
+    }
+
+    EVT GT = MVT::INVALID_SIMPLE_VALUE_TYPE;
+
+    if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Base))
+      GT = EVT::getEVT(GA->getGlobal()->getValueType());
+    if (Base->getOpcode() == WebAssemblyISD::Wrapper)
+      if (const GlobalAddressSDNode *GA =
+              dyn_cast<GlobalAddressSDNode>(Base->getOperand(0)))
+        GT = EVT::getEVT(GA->getGlobal()->getValueType());
+
+    if (GT != MVT::i8 && GT != MVT::i16 && GT != MVT::i32 && GT != MVT::i64 &&
+        GT != MVT::f32 && GT != MVT::f64)
+      report_fatal_error("encountered unexpected global type for Base when "
+                         "loading from webassembly global",
+                         false);
+
+    EVT PromotedGT = (GT == MVT::i8 || GT == MVT::i16) ? MVT::i32 : GT;
+
+    if (ExtType == ISD::NON_EXTLOAD) {
+      // A normal, non-extending load may try to load more or less than the
+      // underlying global, which is invalid. We lower this to a load of the
+      // global (i32 or i64) then truncate or extend as needed
+
+      // Modify the MMO to load the full global
+      MachineMemOperand *OldMMO = LN->getMemOperand();
+      MachineMemOperand *NewMMO = DAG.getMachineFunction().getMachineMemOperand(
+          OldMMO->getPointerInfo(), OldMMO->getFlags(),
+          LLT(PromotedGT.getSimpleVT()), OldMMO->getBaseAlign(),
+          OldMMO->getAAInfo(), OldMMO->getRanges(), OldMMO->getSyncScopeID(),
+          OldMMO->getSuccessOrdering(), OldMMO->getFailureOrdering());
+
+      SDVTList Tys = DAG.getVTList(PromotedGT, MVT::Other);
+      SDValue Ops[] = {LN->getChain(), Base};
+      SDValue GlobalGetNode = DAG.getMemIntrinsicNode(
+          WebAssemblyISD::GLOBAL_GET, DL, Tys, Ops, PromotedGT, NewMMO);
+
+      if (ResultType.bitsEq(PromotedGT)) {
+        return GlobalGetNode;
+      }
+
+      SDValue ValRes;
+      if (ResultType.isFloatingPoint())
+        ValRes = DAG.getFPExtendOrRound(GlobalGetNode, DL, ResultType);
+      else
+        ValRes = DAG.getAnyExtOrTrunc(GlobalGetNode, DL, ResultType);
+
+      return DAG.getMergeValues({ValRes, GlobalGetNode.getValue(1)}, DL);
+    }
+
+    if (ExtType == ISD::ZEXTLOAD || ExtType == ISD::SEXTLOAD) {
+      // Turn the unsupported load into an EXTLOAD followed by an
+      // explicit zero/sign extend inreg. Same as Expand
+
+      SDValue Result =
+          DAG.getExtLoad(ISD::EXTLOAD, DL, ResultType, LN->getChain(), Base,
+                         LN->getMemoryVT(), LN->getMemOperand());
+      SDValue ValRes;
+      if (ExtType == ISD::SEXTLOAD)
+        ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Result.getValueType(),
+                             Result, DAG.getValueType(LN->getMemoryVT()));
+      else
+        ValRes = DAG.getZeroExtendInReg(Result, DL, LN->getMemoryVT());
+
+      return DAG.getMergeValues({ValRes, Result.getValue(1)}, DL);
+    }
+
+    if (ExtType == ISD::EXTLOAD) {
+      // Expand the EXTLOAD into a regular LOAD of the global, and if
+      // needed, a zero-extension
+
+      EVT OldLoadType = LN->getMemoryVT();
+      EVT NewLoadType = (OldLoadType == MVT::i8 || OldLoadType == MVT::i16)
+                            ? MVT::i32
+                            : OldLoadType;
+
+      // Modify the MMO to load a whole WASM "register"'s worth
+      MachineMemOperand *OldMMO = LN->getMemOperand();
+      MachineMemOperand *NewMMO = DAG.getMachineFunction().getMachineMemOperand(
+          OldMMO->getPointerInfo(), OldMMO->getFlags(),
+          LLT(NewLoadType.getSimpleVT()), OldMMO->getBaseAlign(),
+          OldMMO->getAAInfo(), OldMMO->getRanges(), OldMMO->getSyncScopeID(),
+          OldMMO->getSuccessOrdering(), OldMMO->getFailureOrdering());
+
+      SDValue Result =
+          DAG.getLoad(NewLoadType, DL, LN->getChain(), Base, NewMMO);
+
+      if (NewLoadType != ResultType) {
+        SDValue ValRes = DAG.getNode(ISD::ANY_EXTEND, DL, ResultType, Result);
+        return DAG.getMergeValues({ValRes, Result.getValue(1)}, DL);
+      }
+
+      return Result;
+    }
+
+    report_fatal_error(
+        "encountered unexpected ExtType when loading from webassembly global",
+        false);
   }
 
   if (std::optional<unsigned> Local = IsWebAssemblyLocal(Base, DAG)) {
diff --git a/llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll b/llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll
index 3da1ad9e36831..0112296df1aa8 100644
--- a/llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll
+++ b/llvm/test/CodeGen/WebAssembly/lower-load-wasm-global.ll
@@ -15,8 +15,9 @@ define i32 @zext_i8_i32() {
 ; CHECK-LABEL: zext_i8_i32:
 ; CHECK:         .functype zext_i8_i32 () -> (i32)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i32.load8_u globalI32
+; CHECK-NEXT:    global.get globalI32
+; CHECK-NEXT:    i32.const 255
+; CHECK-NEXT:    i32.and
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i8, ptr addrspace(1) @globalI32
   %e = zext i8 %v to i32
@@ -27,8 +28,8 @@ define i32 @sext_i8_i32() {
 ; CHECK-LABEL: sext_i8_i32:
 ; CHECK:         .functype sext_i8_i32 () -> (i32)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i32.load8_s globalI32
+; CHECK-NEXT:    global.get globalI32
+; CHECK-NEXT:    i32.extend8_s
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i8, ptr addrspace(1) @globalI32
   %e = sext i8 %v to i32
@@ -39,8 +40,9 @@ define i32 @zext_i16_i32() {
 ; CHECK-LABEL: zext_i16_i32:
 ; CHECK:         .functype zext_i16_i32 () -> (i32)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i32.load16_u globalI32
+; CHECK-NEXT:    global.get globalI32
+; CHECK-NEXT:    i32.const 65535
+; CHECK-NEXT:    i32.and
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i16, ptr addrspace(1) @globalI32
   %e = zext i16 %v to i32
@@ -51,8 +53,8 @@ define i32 @sext_i16_i32() {
 ; CHECK-LABEL: sext_i16_i32:
 ; CHECK:         .functype sext_i16_i32 () -> (i32)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i32.load16_s globalI32
+; CHECK-NEXT:    global.get globalI32
+; CHECK-NEXT:    i32.extend16_s
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i16, ptr addrspace(1) @globalI32
   %e = sext i16 %v to i32
@@ -64,8 +66,9 @@ define i64 @zext_i8_i64() {
 ; CHECK-LABEL: zext_i8_i64:
 ; CHECK:         .functype zext_i8_i64 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64.load8_u globalI64
+; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    i64.const 255
+; CHECK-NEXT:    i64.and
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i8, ptr addrspace(1) @globalI64
   %e = zext i8 %v to i64
@@ -76,8 +79,8 @@ define i64 @sext_i8_i64() {
 ; CHECK-LABEL: sext_i8_i64:
 ; CHECK:         .functype sext_i8_i64 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64.load8_s globalI64
+; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    i64.extend8_s
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i8, ptr addrspace(1) @globalI64
   %e = sext i8 %v to i64
@@ -88,8 +91,9 @@ define i64 @zext_i16_i64() {
 ; CHECK-LABEL: zext_i16_i64:
 ; CHECK:         .functype zext_i16_i64 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64.load16_u globalI64
+; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    i64.const 65535
+; CHECK-NEXT:    i64.and
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i16, ptr addrspace(1) @globalI64
   %e = zext i16 %v to i64
@@ -100,8 +104,8 @@ define i64 @sext_i16_i64() {
 ; CHECK-LABEL: sext_i16_i64:
 ; CHECK:         .functype sext_i16_i64 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64.load16_s globalI64
+; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    i64.extend16_s
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i16, ptr addrspace(1) @globalI64
   %e = sext i16 %v to i64
@@ -112,8 +116,9 @@ define i64 @zext_i32_i64() {
 ; CHECK-LABEL: zext_i32_i64:
 ; CHECK:         .functype zext_i32_i64 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64.load32_u globalI64
+; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    i64.const 4294967295
+; CHECK-NEXT:    i64.and
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i32, ptr addrspace(1) @globalI64
   %e = zext i32 %v to i64
@@ -124,8 +129,8 @@ define i64 @sext_i32_i64() {
 ; CHECK-LABEL: sext_i32_i64:
 ; CHECK:         .functype sext_i32_i64 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64.load32_s globalI64
+; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    i64.extend32_s
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i32, ptr addrspace(1) @globalI64
   %e = sext i32 %v to i64
@@ -138,6 +143,7 @@ define i64 @load_i64_from_i32() {
 ; CHECK:         .functype load_i64_from_i32 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    global.get globalI32
+; CHECK-NEXT:    i64.extend_i32_u
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i64, ptr addrspace(1) @globalI32
   ret i64 %v
@@ -148,6 +154,7 @@ define i32 @load_i32_from_i64() {
 ; CHECK:         .functype load_i32_from_i64 () -> (i32)
 ; CHECK-NEXT:  # %bb.0:
 ; CHECK-NEXT:    global.get globalI64
+; CHECK-NEXT:    i32.wrap_i64
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i32, ptr addrspace(1) @globalI64
   ret i32 %v
@@ -157,8 +164,7 @@ define i8 @load_i8() {
 ; CHECK-LABEL: load_i8:
 ; CHECK:         .functype load_i8 () -> (i32)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i32.load8_u globalI8
+; CHECK-NEXT:    global.get globalI8
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i8, ptr addrspace(1) @globalI8
   ret i8 %v
@@ -168,8 +174,10 @@ define i64 @load_i16_from_i8_zext_to_i64() {
 ; CHECK-LABEL: load_i16_from_i8_zext_to_i64:
 ; CHECK:         .functype load_i16_from_i8_zext_to_i64 () -> (i64)
 ; CHECK-NEXT:  # %bb.0:
-; CHECK-NEXT:    i32.const 0
-; CHECK-NEXT:    i64.load16_u globalI8
+; CHECK-NEXT:    global.get globalI8
+; CHECK-NEXT:    i64.extend_i32_u
+; CHECK-NEXT:    i64.const 65535
+; CHECK-NEXT:    i64.and
 ; CHECK-NEXT:    # fallthrough-return
   %v = load i16, ptr addrspace(1) @globalI8
   %e = zext i16 %v to i64



More information about the llvm-commits mailing list