[llvm] [llubi] Add support for bitcast (PR #185205)

Yingwei Zheng via llvm-commits llvm-commits at lists.llvm.org
Sun Mar 8 11:44:02 PDT 2026


https://github.com/dtcxzyw updated https://github.com/llvm/llvm-project/pull/185205

>From 8d9afbb3d1532ecb119f9568db826229753a357f Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sun, 8 Mar 2026 01:02:02 +0800
Subject: [PATCH 1/4] [llubi] Add support for bitcast

---
 llvm/test/tools/llubi/bitcast_be.ll  | 49 ++++++++++++++++++++++++++++
 llvm/test/tools/llubi/bitcast_le.ll  | 49 ++++++++++++++++++++++++++++
 llvm/tools/llubi/lib/Context.cpp     |  7 ++++
 llvm/tools/llubi/lib/Interpreter.cpp | 11 +++++++
 4 files changed, 116 insertions(+)
 create mode 100644 llvm/test/tools/llubi/bitcast_be.ll
 create mode 100644 llvm/test/tools/llubi/bitcast_le.ll

diff --git a/llvm/test/tools/llubi/bitcast_be.ll b/llvm/test/tools/llubi/bitcast_be.ll
new file mode 100644
index 0000000000000..cdc650bf25196
--- /dev/null
+++ b/llvm/test/tools/llubi/bitcast_be.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_llubi_test_checks.py UTC_ARGS: --version 6
+; RUN: llubi --verbose < %s 2>&1 | FileCheck %s
+target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
+
+define void @main() {
+entry:
+  %bitcast_int2int = bitcast i32 1 to i32
+  %bitcast_int2int_poison = bitcast i32 poison to i32
+  %bitcast_int2float = bitcast i32 0 to float
+  %bitcast_float2float = bitcast float 2.0 to float
+  %bitcast_float2int = bitcast float 2.0 to i32
+  %bitcast_half2bf16 = bitcast half 1.0 to bfloat
+  %ptr = alloca i32
+  ; FIXME: The provenance is lost.
+  %bitcast_ptr2ptr = bitcast ptr %ptr to ptr
+
+  %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64
+  %bitcast_scalar2vec = bitcast i64 1 to <2 x i32>
+  %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64
+  %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32>
+
+  %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16>
+  %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32>
+  %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 4, i3 5, i3 6, i3 7> to <3 x i8>
+
+  %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half>
+  %bitcast_floatvec2int = bitcast <4 x half> <half 1.0, half 2.0, half 3.0, half 4.0> to i64
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT:   %bitcast_int2int = bitcast i32 1 to i32 => i32 1
+; CHECK-NEXT:   %bitcast_int2int_poison = bitcast i32 poison to i32 => poison
+; CHECK-NEXT:   %bitcast_int2float = bitcast i32 0 to float => 0
+; CHECK-NEXT:   %bitcast_float2float = bitcast float 2.000000e+00 to float => 2
+; CHECK-NEXT:   %bitcast_float2int = bitcast float 2.000000e+00 to i32 => i32 1073741824
+; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 0.007813
+; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
+; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
+; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
+; CHECK-NEXT:   %bitcast_scalar2vec = bitcast i64 1 to <2 x i32> => { i32 1, i32 0 }
+; CHECK-NEXT:   %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64 => poison
+; CHECK-NEXT:   %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32> => { poison, poison }
+; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
+; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
+; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 5, i8 57, i8 119 }
+; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.9605E-8, 0, 1.1921E-7, 0 }
+; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
+; CHECK-NEXT:   ret void
+; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/bitcast_le.ll b/llvm/test/tools/llubi/bitcast_le.ll
new file mode 100644
index 0000000000000..21173a625e6c2
--- /dev/null
+++ b/llvm/test/tools/llubi/bitcast_le.ll
@@ -0,0 +1,49 @@
+; NOTE: Assertions have been autogenerated by utils/update_llubi_test_checks.py UTC_ARGS: --version 6
+; RUN: llubi --verbose < %s 2>&1 | FileCheck %s
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
+
+define void @main() {
+entry:
+  %bitcast_int2int = bitcast i32 1 to i32
+  %bitcast_int2int_poison = bitcast i32 poison to i32
+  %bitcast_int2float = bitcast i32 0 to float
+  %bitcast_float2float = bitcast float 2.0 to float
+  %bitcast_float2int = bitcast float 2.0 to i32
+  %bitcast_half2bf16 = bitcast half 1.0 to bfloat
+  %ptr = alloca i32
+  ; FIXME: The provenance is lost.
+  %bitcast_ptr2ptr = bitcast ptr %ptr to ptr
+
+  %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64
+  %bitcast_scalar2vec = bitcast i64 1 to <2 x i32>
+  %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64
+  %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32>
+
+  %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16>
+  %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32>
+  %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 4, i3 5, i3 6, i3 7> to <3 x i8>
+
+  %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half>
+  %bitcast_floatvec2int = bitcast <4 x half> <half 1.0, half 2.0, half 3.0, half 4.0> to i64
+  ret void
+}
+; CHECK: Entering function: main
+; CHECK-NEXT:   %bitcast_int2int = bitcast i32 1 to i32 => i32 1
+; CHECK-NEXT:   %bitcast_int2int_poison = bitcast i32 poison to i32 => poison
+; CHECK-NEXT:   %bitcast_int2float = bitcast i32 0 to float => 0
+; CHECK-NEXT:   %bitcast_float2float = bitcast float 2.000000e+00 to float => 2
+; CHECK-NEXT:   %bitcast_float2int = bitcast float 2.000000e+00 to i32 => i32 1073741824
+; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 0.007813
+; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
+; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
+; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
+; CHECK-NEXT:   %bitcast_scalar2vec = bitcast i64 1 to <2 x i32> => { i32 1, i32 0 }
+; CHECK-NEXT:   %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64 => poison
+; CHECK-NEXT:   %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32> => { poison, poison }
+; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
+; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
+; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 -120, i8 -58, i8 -6 }
+; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.9605E-8, 0, 1.1921E-7, 0 }
+; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
+; CHECK-NEXT:   ret void
+; CHECK-NEXT: Exiting function: main
diff --git a/llvm/tools/llubi/lib/Context.cpp b/llvm/tools/llubi/lib/Context.cpp
index 0ff685c05643c..e84047b839bfa 100644
--- a/llvm/tools/llubi/lib/Context.cpp
+++ b/llvm/tools/llubi/lib/Context.cpp
@@ -70,6 +70,13 @@ AnyValue Context::getConstantValueImpl(Constant *C) {
     return CI->getValue();
   }
 
+  if (auto *CFP = dyn_cast<ConstantFP>(C)) {
+    if (auto *VecTy = dyn_cast<VectorType>(CFP->getType()))
+      return std::vector<AnyValue>(getEVL(VecTy->getElementCount()),
+                                   AnyValue(CFP->getValue()));
+    return CFP->getValue();
+  }
+
   if (auto *CDS = dyn_cast<ConstantDataSequential>(C)) {
     std::vector<AnyValue> Elts;
     Elts.reserve(CDS->getNumElements());
diff --git a/llvm/tools/llubi/lib/Interpreter.cpp b/llvm/tools/llubi/lib/Interpreter.cpp
index dd5530a355538..948fb3334423a 100644
--- a/llvm/tools/llubi/lib/Interpreter.cpp
+++ b/llvm/tools/llubi/lib/Interpreter.cpp
@@ -1108,6 +1108,17 @@ class InstExecutor : public InstVisitor<InstExecutor, void> {
     setResult(SVI, std::move(Res));
   }
 
+  void visitBitCastInst(BitCastInst &BCI) {
+    // The conversion is done as if the value had been stored to memory and read
+    // back as the target type.
+    SmallVector<Byte> Bytes;
+    Bytes.resize(Ctx.getEffectiveTypeStoreSize(BCI.getType()),
+                 Byte::concrete(0));
+    Ctx.toBytes(getValue(BCI.getOperand(0)), BCI.getOperand(0)->getType(),
+                Bytes);
+    setResult(BCI, Ctx.fromBytes(Bytes, BCI.getType()));
+  }
+
   /// This function implements the main interpreter loop.
   /// It handles function calls in a non-recursive manner to avoid stack
   /// overflows.

>From 2683cf248f8edfc3faf1100ceac5ce36cbfb20b9 Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sun, 8 Mar 2026 02:22:12 +0800
Subject: [PATCH 2/4] [llubi] Update format of APFloat

---
 llvm/test/tools/llubi/bitcast_be.ll   | 16 ++++++++++------
 llvm/test/tools/llubi/bitcast_le.ll   | 16 ++++++++++------
 llvm/test/tools/llubi/loadstore_be.ll |  2 +-
 llvm/test/tools/llubi/loadstore_le.ll |  2 +-
 llvm/tools/llubi/lib/Value.cpp        | 23 +++++++++++++++++++++--
 5 files changed, 43 insertions(+), 16 deletions(-)

diff --git a/llvm/test/tools/llubi/bitcast_be.ll b/llvm/test/tools/llubi/bitcast_be.ll
index cdc650bf25196..8d59d43d23c23 100644
--- a/llvm/test/tools/llubi/bitcast_be.ll
+++ b/llvm/test/tools/llubi/bitcast_be.ll
@@ -6,8 +6,10 @@ define void @main() {
 entry:
   %bitcast_int2int = bitcast i32 1 to i32
   %bitcast_int2int_poison = bitcast i32 poison to i32
-  %bitcast_int2float = bitcast i32 0 to float
-  %bitcast_float2float = bitcast float 2.0 to float
+  %bitcast_int2float1 = bitcast i32 0 to float
+  %bitcast_int2float2 = bitcast i32 5033160 to float
+  %bitcast_float2float1 = bitcast float 2.0 to float
+  %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float
   %bitcast_float2int = bitcast float 2.0 to i32
   %bitcast_half2bf16 = bitcast half 1.0 to bfloat
   %ptr = alloca i32
@@ -30,10 +32,12 @@ entry:
 ; CHECK: Entering function: main
 ; CHECK-NEXT:   %bitcast_int2int = bitcast i32 1 to i32 => i32 1
 ; CHECK-NEXT:   %bitcast_int2int_poison = bitcast i32 poison to i32 => poison
-; CHECK-NEXT:   %bitcast_int2float = bitcast i32 0 to float => 0
-; CHECK-NEXT:   %bitcast_float2float = bitcast float 2.000000e+00 to float => 2
+; CHECK-NEXT:   %bitcast_int2float1 = bitcast i32 0 to float => 0.000000e+00
+; CHECK-NEXT:   %bitcast_int2float2 = bitcast i32 5033160 to float => 0x004CCCC8
+; CHECK-NEXT:   %bitcast_float2float1 = bitcast float 2.000000e+00 to float => 2.000000e+00
+; CHECK-NEXT:   %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float => 0x3D4CCCC8
 ; CHECK-NEXT:   %bitcast_float2int = bitcast float 2.000000e+00 to i32 => i32 1073741824
-; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 0.007813
+; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 7.812500e-03
 ; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
 ; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
 ; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
@@ -43,7 +47,7 @@ entry:
 ; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
 ; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
 ; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 5, i8 57, i8 119 }
-; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.9605E-8, 0, 1.1921E-7, 0 }
+; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.960460e-08, 0.000000e+00, 1.192090e-07, 0.000000e+00 }
 ; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/bitcast_le.ll b/llvm/test/tools/llubi/bitcast_le.ll
index 21173a625e6c2..c747f9db8c7cd 100644
--- a/llvm/test/tools/llubi/bitcast_le.ll
+++ b/llvm/test/tools/llubi/bitcast_le.ll
@@ -6,8 +6,10 @@ define void @main() {
 entry:
   %bitcast_int2int = bitcast i32 1 to i32
   %bitcast_int2int_poison = bitcast i32 poison to i32
-  %bitcast_int2float = bitcast i32 0 to float
-  %bitcast_float2float = bitcast float 2.0 to float
+  %bitcast_int2float1 = bitcast i32 0 to float
+  %bitcast_int2float2 = bitcast i32 5033160 to float
+  %bitcast_float2float1 = bitcast float 2.0 to float
+  %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float
   %bitcast_float2int = bitcast float 2.0 to i32
   %bitcast_half2bf16 = bitcast half 1.0 to bfloat
   %ptr = alloca i32
@@ -30,10 +32,12 @@ entry:
 ; CHECK: Entering function: main
 ; CHECK-NEXT:   %bitcast_int2int = bitcast i32 1 to i32 => i32 1
 ; CHECK-NEXT:   %bitcast_int2int_poison = bitcast i32 poison to i32 => poison
-; CHECK-NEXT:   %bitcast_int2float = bitcast i32 0 to float => 0
-; CHECK-NEXT:   %bitcast_float2float = bitcast float 2.000000e+00 to float => 2
+; CHECK-NEXT:   %bitcast_int2float1 = bitcast i32 0 to float => 0.000000e+00
+; CHECK-NEXT:   %bitcast_int2float2 = bitcast i32 5033160 to float => 0x004CCCC8
+; CHECK-NEXT:   %bitcast_float2float1 = bitcast float 2.000000e+00 to float => 2.000000e+00
+; CHECK-NEXT:   %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float => 0x3D4CCCC8
 ; CHECK-NEXT:   %bitcast_float2int = bitcast float 2.000000e+00 to i32 => i32 1073741824
-; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 0.007813
+; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 7.812500e-03
 ; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
 ; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
 ; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
@@ -43,7 +47,7 @@ entry:
 ; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
 ; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
 ; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 -120, i8 -58, i8 -6 }
-; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.9605E-8, 0, 1.1921E-7, 0 }
+; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.960460e-08, 0.000000e+00, 1.192090e-07, 0.000000e+00 }
 ; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/loadstore_be.ll b/llvm/test/tools/llubi/loadstore_be.ll
index 3dc03de73f303..60c722c57df26 100644
--- a/llvm/test/tools/llubi/loadstore_be.ll
+++ b/llvm/test/tools/llubi/loadstore_be.ll
@@ -147,7 +147,7 @@ define void @main() {
 ; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
 ; CHECK-NEXT:   %val16 = load i32, ptr %alloc_lifetime, align 4 => poison
 ; CHECK-NEXT:   store i32 -524288, ptr %alloc, align 4
-; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => NaN
+; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => 0xFFF80000
 ; CHECK-NEXT:   %alloc_vscale = alloca <vscale x 2 x i32>, align 8 => ptr 0x10 [alloc_vscale]
 ; CHECK-NEXT:   %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0 => { i32 1, poison, poison, poison }
 ; CHECK-NEXT:   %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer => { i32 1, i32 1, i32 1, i32 1 }
diff --git a/llvm/test/tools/llubi/loadstore_le.ll b/llvm/test/tools/llubi/loadstore_le.ll
index 35e1cd425e72c..e6ac89502ee5d 100644
--- a/llvm/test/tools/llubi/loadstore_le.ll
+++ b/llvm/test/tools/llubi/loadstore_le.ll
@@ -149,7 +149,7 @@ define void @main() {
 ; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
 ; CHECK-NEXT:   %val16 = load i32, ptr %alloc_lifetime, align 4 => poison
 ; CHECK-NEXT:   store i32 -524288, ptr %alloc, align 4
-; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => NaN
+; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => 0xFFF80000
 ; CHECK-NEXT:   %alloc_vscale = alloca <vscale x 2 x i32>, align 8 => ptr 0x10 [alloc_vscale]
 ; CHECK-NEXT:   %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0 => { i32 1, poison, poison, poison }
 ; CHECK-NEXT:   %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer => { i32 1, i32 1, i32 1, i32 1 }
diff --git a/llvm/tools/llubi/lib/Value.cpp b/llvm/tools/llubi/lib/Value.cpp
index 172e810bcb77e..aec3a3542bd1e 100644
--- a/llvm/tools/llubi/lib/Value.cpp
+++ b/llvm/tools/llubi/lib/Value.cpp
@@ -44,9 +44,28 @@ void AnyValue::print(raw_ostream &OS) const {
     }
     OS << "i" << IntVal.getBitWidth() << ' ' << IntVal;
     break;
-  case StorageKind::Float:
-    OS << FloatVal;
+  case StorageKind::Float: {
+    // We cannot reuse Value::print due to lack of LLVMContext here.
+    // Similar to writeAPFloatInternal, output the FP constant value in
+    // exponential notation if it is lossless, otherwise output it in
+    // hexadecimal notation.
+    SmallString<16> StrVal;
+    FloatVal.toString(StrVal, /*FormatPrecision=*/6, /*FormatMaxPadding=*/0,
+                      /*TruncateZero=*/false);
+    if (APFloat(FloatVal.getSemantics(), StrVal).bitwiseIsEqual(FloatVal)) {
+      OS << StrVal;
+    } else {
+      StrVal.clear();
+      APInt Bits = FloatVal.bitcastToAPInt();
+      Bits.toStringUnsigned(StrVal, 16);
+      size_t MaxDigits = divideCeil(Bits.getBitWidth(), 4);
+      OS << "0x";
+      for (size_t Digits = StrVal.size(); Digits != MaxDigits; ++Digits)
+        OS << '0';
+      OS << StrVal;
+    }
     break;
+  }
   case StorageKind::Pointer:
     PtrVal.print(OS);
     break;

>From 481c8eeba4e118579d22618d44047b73919cac9a Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Sun, 8 Mar 2026 02:41:22 +0800
Subject: [PATCH 3/4] [llubi] Add type prefix for float values

---
 llvm/test/tools/llubi/bitcast_be.ll   | 12 ++++++------
 llvm/test/tools/llubi/bitcast_le.ll   | 12 ++++++------
 llvm/test/tools/llubi/loadstore_be.ll |  2 +-
 llvm/test/tools/llubi/loadstore_le.ll |  2 +-
 llvm/tools/llubi/lib/Value.cpp        | 25 +++++++++++++++++++++++++
 5 files changed, 39 insertions(+), 14 deletions(-)

diff --git a/llvm/test/tools/llubi/bitcast_be.ll b/llvm/test/tools/llubi/bitcast_be.ll
index 8d59d43d23c23..9e73411095b17 100644
--- a/llvm/test/tools/llubi/bitcast_be.ll
+++ b/llvm/test/tools/llubi/bitcast_be.ll
@@ -32,12 +32,12 @@ entry:
 ; CHECK: Entering function: main
 ; CHECK-NEXT:   %bitcast_int2int = bitcast i32 1 to i32 => i32 1
 ; CHECK-NEXT:   %bitcast_int2int_poison = bitcast i32 poison to i32 => poison
-; CHECK-NEXT:   %bitcast_int2float1 = bitcast i32 0 to float => 0.000000e+00
-; CHECK-NEXT:   %bitcast_int2float2 = bitcast i32 5033160 to float => 0x004CCCC8
-; CHECK-NEXT:   %bitcast_float2float1 = bitcast float 2.000000e+00 to float => 2.000000e+00
-; CHECK-NEXT:   %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float => 0x3D4CCCC8
+; CHECK-NEXT:   %bitcast_int2float1 = bitcast i32 0 to float => float 0.000000e+00
+; CHECK-NEXT:   %bitcast_int2float2 = bitcast i32 5033160 to float => float 0x004CCCC8
+; CHECK-NEXT:   %bitcast_float2float1 = bitcast float 2.000000e+00 to float => float 2.000000e+00
+; CHECK-NEXT:   %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float => float 0x3D4CCCC8
 ; CHECK-NEXT:   %bitcast_float2int = bitcast float 2.000000e+00 to i32 => i32 1073741824
-; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 7.812500e-03
+; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => bfloat 7.812500e-03
 ; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
 ; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
 ; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
@@ -47,7 +47,7 @@ entry:
 ; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
 ; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
 ; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 5, i8 57, i8 119 }
-; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.960460e-08, 0.000000e+00, 1.192090e-07, 0.000000e+00 }
+; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { half 5.960460e-08, half 0.000000e+00, half 1.192090e-07, half 0.000000e+00 }
 ; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/bitcast_le.ll b/llvm/test/tools/llubi/bitcast_le.ll
index c747f9db8c7cd..19e595074a4d5 100644
--- a/llvm/test/tools/llubi/bitcast_le.ll
+++ b/llvm/test/tools/llubi/bitcast_le.ll
@@ -32,12 +32,12 @@ entry:
 ; CHECK: Entering function: main
 ; CHECK-NEXT:   %bitcast_int2int = bitcast i32 1 to i32 => i32 1
 ; CHECK-NEXT:   %bitcast_int2int_poison = bitcast i32 poison to i32 => poison
-; CHECK-NEXT:   %bitcast_int2float1 = bitcast i32 0 to float => 0.000000e+00
-; CHECK-NEXT:   %bitcast_int2float2 = bitcast i32 5033160 to float => 0x004CCCC8
-; CHECK-NEXT:   %bitcast_float2float1 = bitcast float 2.000000e+00 to float => 2.000000e+00
-; CHECK-NEXT:   %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float => 0x3D4CCCC8
+; CHECK-NEXT:   %bitcast_int2float1 = bitcast i32 0 to float => float 0.000000e+00
+; CHECK-NEXT:   %bitcast_int2float2 = bitcast i32 5033160 to float => float 0x004CCCC8
+; CHECK-NEXT:   %bitcast_float2float1 = bitcast float 2.000000e+00 to float => float 2.000000e+00
+; CHECK-NEXT:   %bitcast_float2float2 = bitcast float 0x3FA9999900000000 to float => float 0x3D4CCCC8
 ; CHECK-NEXT:   %bitcast_float2int = bitcast float 2.000000e+00 to i32 => i32 1073741824
-; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => 7.812500e-03
+; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => bfloat 7.812500e-03
 ; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
 ; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
 ; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
@@ -47,7 +47,7 @@ entry:
 ; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
 ; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
 ; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 -120, i8 -58, i8 -6 }
-; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { 5.960460e-08, 0.000000e+00, 1.192090e-07, 0.000000e+00 }
+; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { half 5.960460e-08, half 0.000000e+00, half 1.192090e-07, half 0.000000e+00 }
 ; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/loadstore_be.ll b/llvm/test/tools/llubi/loadstore_be.ll
index 60c722c57df26..237d22253108a 100644
--- a/llvm/test/tools/llubi/loadstore_be.ll
+++ b/llvm/test/tools/llubi/loadstore_be.ll
@@ -147,7 +147,7 @@ define void @main() {
 ; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
 ; CHECK-NEXT:   %val16 = load i32, ptr %alloc_lifetime, align 4 => poison
 ; CHECK-NEXT:   store i32 -524288, ptr %alloc, align 4
-; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => 0xFFF80000
+; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => float 0xFFF80000
 ; CHECK-NEXT:   %alloc_vscale = alloca <vscale x 2 x i32>, align 8 => ptr 0x10 [alloc_vscale]
 ; CHECK-NEXT:   %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0 => { i32 1, poison, poison, poison }
 ; CHECK-NEXT:   %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer => { i32 1, i32 1, i32 1, i32 1 }
diff --git a/llvm/test/tools/llubi/loadstore_le.ll b/llvm/test/tools/llubi/loadstore_le.ll
index e6ac89502ee5d..bdd82ddabbfec 100644
--- a/llvm/test/tools/llubi/loadstore_le.ll
+++ b/llvm/test/tools/llubi/loadstore_le.ll
@@ -149,7 +149,7 @@ define void @main() {
 ; CHECK-NEXT:   call void @llvm.lifetime.end.p0(ptr %alloc_lifetime)
 ; CHECK-NEXT:   %val16 = load i32, ptr %alloc_lifetime, align 4 => poison
 ; CHECK-NEXT:   store i32 -524288, ptr %alloc, align 4
-; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => 0xFFF80000
+; CHECK-NEXT:   %val17 = load float, ptr %alloc, align 4 => float 0xFFF80000
 ; CHECK-NEXT:   %alloc_vscale = alloca <vscale x 2 x i32>, align 8 => ptr 0x10 [alloc_vscale]
 ; CHECK-NEXT:   %insert = insertelement <vscale x 1 x i32> poison, i32 1, i32 0 => { i32 1, poison, poison, poison }
 ; CHECK-NEXT:   %ones = shufflevector <vscale x 1 x i32> %insert, <vscale x 1 x i32> poison, <vscale x 1 x i32> zeroinitializer => { i32 1, i32 1, i32 1, i32 1 }
diff --git a/llvm/tools/llubi/lib/Value.cpp b/llvm/tools/llubi/lib/Value.cpp
index aec3a3542bd1e..f685e86efee20 100644
--- a/llvm/tools/llubi/lib/Value.cpp
+++ b/llvm/tools/llubi/lib/Value.cpp
@@ -45,6 +45,31 @@ void AnyValue::print(raw_ostream &OS) const {
     OS << "i" << IntVal.getBitWidth() << ' ' << IntVal;
     break;
   case StorageKind::Float: {
+    switch (APFloat::SemanticsToEnum(FloatVal.getSemantics())) {
+    default:
+      llvm_unreachable("invalid fltSemantics");
+    case APFloatBase::S_IEEEhalf:
+      OS << "half ";
+      break;
+    case APFloatBase::S_BFloat:
+      OS << "bfloat ";
+      break;
+    case APFloatBase::S_IEEEsingle:
+      OS << "float ";
+      break;
+    case APFloatBase::S_IEEEdouble:
+      OS << "double ";
+      break;
+    case APFloatBase::S_x87DoubleExtended:
+      OS << "x86_fp80 ";
+      break;
+    case APFloatBase::S_IEEEquad:
+      OS << "fp128 ";
+      break;
+    case APFloatBase::S_PPCDoubleDouble:
+      OS << "ppc_fp128 ";
+      break;
+    }
     // We cannot reuse Value::print due to lack of LLVMContext here.
     // Similar to writeAPFloatInternal, output the FP constant value in
     // exponential notation if it is lossless, otherwise output it in

>From 72f2b3605db5a7c832329e039e0c2b9acbdd7c6e Mon Sep 17 00:00:00 2001
From: Yingwei Zheng <dtcxzyw2333 at gmail.com>
Date: Mon, 9 Mar 2026 02:43:33 +0800
Subject: [PATCH 4/4] [llubi] Fix endianness handing

---
 llvm/test/tools/llubi/bitcast_be.ll           |  24 +-
 llvm/test/tools/llubi/bitcast_le.ll           |  18 +-
 llvm/test/tools/llubi/loadstore_be.ll         |  38 +-
 llvm/test/tools/llubi/loadstore_le.ll         |   4 +-
 .../test/tools/llubi/loadstore_overaligned.ll |   6 +-
 llvm/tools/llubi/lib/Context.cpp              | 391 ++++++++----------
 llvm/tools/llubi/lib/Context.h                |  23 +-
 7 files changed, 249 insertions(+), 255 deletions(-)

diff --git a/llvm/test/tools/llubi/bitcast_be.ll b/llvm/test/tools/llubi/bitcast_be.ll
index 9e73411095b17..8e86f3f325251 100644
--- a/llvm/test/tools/llubi/bitcast_be.ll
+++ b/llvm/test/tools/llubi/bitcast_be.ll
@@ -16,13 +16,16 @@ entry:
   ; FIXME: The provenance is lost.
   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr
 
-  %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64
-  %bitcast_scalar2vec = bitcast i64 1 to <2 x i32>
+  %bitcast_vec2scalar1 = bitcast <2 x i32> <i32 0, i32 1> to i64
+  %bitcast_vec2scalar2 = bitcast <4 x i4> <i4 1, i4 2, i4 3, i4 5> to i16
+  %bitcast_scalar2vec1 = bitcast i64 1 to <2 x i32>
+  %bitcast_scalar2vec2 = bitcast i16 32768 to <16 x i1>
   %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64
   %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32>
 
   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16>
-  %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32>
+  %bitcast_vec2vec_down1 = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32>
+  %bitcast_vec2vec_down2 = bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 4, i3 5, i3 6, i3 7> to <3 x i8>
 
   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half>
@@ -40,14 +43,17 @@ entry:
 ; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => bfloat 7.812500e-03
 ; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
 ; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
-; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
-; CHECK-NEXT:   %bitcast_scalar2vec = bitcast i64 1 to <2 x i32> => { i32 1, i32 0 }
+; CHECK-NEXT:   %bitcast_vec2scalar1 = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 1
+; CHECK-NEXT:   %bitcast_vec2scalar2 = bitcast <4 x i4> <i4 1, i4 2, i4 3, i4 5> to i16 => i16 4661
+; CHECK-NEXT:   %bitcast_scalar2vec1 = bitcast i64 1 to <2 x i32> => { i32 0, i32 1 }
+; CHECK-NEXT:   %bitcast_scalar2vec2 = bitcast i16 -32768 to <16 x i1> => { T, F, F, F, F, F, F, F, F, F, F, F, F, F, F, F }
 ; CHECK-NEXT:   %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64 => poison
 ; CHECK-NEXT:   %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32> => { poison, poison }
-; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
-; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
+; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 0, i16 1, poison, poison }
+; CHECK-NEXT:   %bitcast_vec2vec_down1 = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 131075 }
+; CHECK-NEXT:   %bitcast_vec2vec_down2 = bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64> => { i64 1, i64 8589934595 }
 ; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 5, i8 57, i8 119 }
-; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { half 5.960460e-08, half 0.000000e+00, half 1.192090e-07, half 0.000000e+00 }
-; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
+; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { half 0.000000e+00, half 5.960460e-08, half 0.000000e+00, half 1.192090e-07 }
+; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4323526012127167488
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/bitcast_le.ll b/llvm/test/tools/llubi/bitcast_le.ll
index 19e595074a4d5..773e10feaac99 100644
--- a/llvm/test/tools/llubi/bitcast_le.ll
+++ b/llvm/test/tools/llubi/bitcast_le.ll
@@ -16,13 +16,16 @@ entry:
   ; FIXME: The provenance is lost.
   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr
 
-  %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64
-  %bitcast_scalar2vec = bitcast i64 1 to <2 x i32>
+  %bitcast_vec2scalar1 = bitcast <2 x i32> <i32 0, i32 1> to i64
+  %bitcast_vec2scalar2 = bitcast <4 x i4> <i4 1, i4 2, i4 3, i4 5> to i16
+  %bitcast_scalar2vec1 = bitcast i64 1 to <2 x i32>
+  %bitcast_scalar2vec2 = bitcast i16 32768 to <16 x i1>
   %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64
   %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32>
 
   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16>
-  %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32>
+  %bitcast_vec2vec_down1 = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32>
+  %bitcast_vec2vec_down2 = bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 4, i3 5, i3 6, i3 7> to <3 x i8>
 
   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half>
@@ -40,12 +43,15 @@ entry:
 ; CHECK-NEXT:   %bitcast_half2bf16 = bitcast half 0xH3C00 to bfloat => bfloat 7.812500e-03
 ; CHECK-NEXT:   %ptr = alloca i32, align 4 => ptr 0x8 [ptr]
 ; CHECK-NEXT:   %bitcast_ptr2ptr = bitcast ptr %ptr to ptr => ptr 0x8 [dangling]
-; CHECK-NEXT:   %bitcast_vec2scalar = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
-; CHECK-NEXT:   %bitcast_scalar2vec = bitcast i64 1 to <2 x i32> => { i32 1, i32 0 }
+; CHECK-NEXT:   %bitcast_vec2scalar1 = bitcast <2 x i32> <i32 0, i32 1> to i64 => i64 4294967296
+; CHECK-NEXT:   %bitcast_vec2scalar2 = bitcast <4 x i4> <i4 1, i4 2, i4 3, i4 5> to i16 => i16 21281
+; CHECK-NEXT:   %bitcast_scalar2vec1 = bitcast i64 1 to <2 x i32> => { i32 1, i32 0 }
+; CHECK-NEXT:   %bitcast_scalar2vec2 = bitcast i16 -32768 to <16 x i1> => { F, F, F, F, F, F, F, F, F, F, F, F, F, F, F, T }
 ; CHECK-NEXT:   %bitcast_vec2scalar_partial_poison = bitcast <2 x i32> <i32 poison, i32 0> to i64 => poison
 ; CHECK-NEXT:   %bitcast_scalar2vec_poison = bitcast i64 poison to <2 x i32> => { poison, poison }
 ; CHECK-NEXT:   %bitcast_vec2vec_up = bitcast <2 x i32> <i32 1, i32 poison> to <4 x i16> => { i16 1, i16 0, poison, poison }
-; CHECK-NEXT:   %bitcast_vec2vec_down = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
+; CHECK-NEXT:   %bitcast_vec2vec_down1 = bitcast <4 x i16> <i16 0, i16 poison, i16 2, i16 3> to <2 x i32> => { poison, i32 196610 }
+; CHECK-NEXT:   %bitcast_vec2vec_down2 = bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64> => { i64 4294967296, i64 12884901890 }
 ; CHECK-NEXT:   %bitcast_vec2vec_weird = bitcast <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1> to <3 x i8> => { i8 -120, i8 -58, i8 -6 }
 ; CHECK-NEXT:   %bitcast_intvec2floatvec = bitcast <2 x i32> <i32 1, i32 2> to <4 x half> => { half 5.960460e-08, half 0.000000e+00, half 1.192090e-07, half 0.000000e+00 }
 ; CHECK-NEXT:   %bitcast_floatvec2int = bitcast <4 x half> <half 0xH3C00, half 0xH4000, half 0xH4200, half 0xH4400> to i64 => i64 4899988963420290048
diff --git a/llvm/test/tools/llubi/loadstore_be.ll b/llvm/test/tools/llubi/loadstore_be.ll
index 237d22253108a..82b362d87f24f 100644
--- a/llvm/test/tools/llubi/loadstore_be.ll
+++ b/llvm/test/tools/llubi/loadstore_be.ll
@@ -116,19 +116,19 @@ define void @main() {
 ; CHECK-NEXT:   %val2 = load i32, ptr %alloc, align 2 => i32 66051
 ; CHECK-NEXT:   %gep = getelementptr i8, ptr %alloc, i64 1 => ptr 0x9 [alloc + 1]
 ; CHECK-NEXT:   %val3 = load i8, ptr %gep, align 1 => i8 1
-; CHECK-NEXT:   %val4 = load <4 x i8>, ptr %alloc, align 4 => { i8 3, i8 2, i8 1, i8 0 }
+; CHECK-NEXT:   %val4 = load <4 x i8>, ptr %alloc, align 4 => { i8 0, i8 1, i8 2, i8 3 }
 ; CHECK-NEXT:   store i16 1029, ptr %gep, align 1
-; CHECK-NEXT:   %val5 = load <4 x i8>, ptr %alloc, align 4 => { i8 3, i8 5, i8 4, i8 0 }
+; CHECK-NEXT:   %val5 = load <4 x i8>, ptr %alloc, align 4 => { i8 0, i8 4, i8 5, i8 3 }
 ; CHECK-NEXT:   store <2 x i16> <i16 1543, i16 2057>, ptr %alloc, align 4
-; CHECK-NEXT:   %val6 = load <4 x i8>, ptr %alloc, align 4 => { i8 7, i8 6, i8 9, i8 8 }
-; CHECK-NEXT:   %val7 = load <8 x i4>, ptr %alloc, align 4 => { i4 0, i4 7, i4 0, i4 6, i4 0, i4 -7, i4 0, i4 -8 }
+; CHECK-NEXT:   %val6 = load <4 x i8>, ptr %alloc, align 4 => { i8 6, i8 7, i8 8, i8 9 }
+; CHECK-NEXT:   %val7 = load <8 x i4>, ptr %alloc, align 4 => { i4 0, i4 6, i4 0, i4 7, i4 0, i4 -8, i4 0, i4 -7 }
 ; CHECK-NEXT:   store <3 x i3> <i3 1, i3 2, i3 3>, ptr %alloc, align 2
 ; CHECK-NEXT:   %val8 = load <16 x i1>, ptr %alloc, align 2 => { F, F, F, F, F, F, F, F, F, T, F, T, F, F, T, T }
 ; CHECK-NEXT:   %val9 = load <16 x i1>, ptr %alloc, align 2 => { F, F, F, F, F, F, F, F, F, T, F, T, F, F, T, T }
 ; CHECK-NEXT:   store <8 x i3> <i3 0, i3 1, i3 2, i3 3, i3 -4, i3 -3, i3 -2, i3 -1>, ptr %alloc, align 4
 ; CHECK-NEXT:   %val_bitcast = load <3 x i8>, ptr %alloc, align 4 => { i8 5, i8 57, i8 119 }
 ; CHECK-NEXT:   store i25 -1, ptr %alloc, align 4
-; CHECK-NEXT:   %val10 = load <4 x i8>, ptr %alloc, align 4 => { i8 -1, i8 -1, i8 -1, i8 1 }
+; CHECK-NEXT:   %val10 = load <4 x i8>, ptr %alloc, align 4 => { i8 1, i8 -1, i8 -1, i8 -1 }
 ; CHECK-NEXT:   store i8 -1, ptr %alloc, align 1
 ; CHECK-NEXT:   %val11 = load i25, ptr %alloc, align 4 => poison
 ; CHECK-NEXT:   call void @llvm.lifetime.start.p0(ptr poison)
@@ -155,36 +155,36 @@ define void @main() {
 ; CHECK-NEXT:   store <vscale x 1 x i32> %ones, ptr %alloc_vscale, align 4
 ; CHECK-NEXT:   %gep3 = getelementptr <vscale x 1 x i32>, ptr %alloc_vscale, i64 1 => ptr 0x20 [alloc_vscale + 16]
 ; CHECK-NEXT:   store <vscale x 1 x i32> %twos, ptr %gep3, align 4
-; CHECK-NEXT:   %val18 = load <vscale x 2 x i32>, ptr %alloc_vscale, align 8 => { i32 2, i32 2, i32 2, i32 2, i32 1, i32 1, i32 1, i32 1 }
+; CHECK-NEXT:   %val18 = load <vscale x 2 x i32>, ptr %alloc_vscale, align 8 => { i32 1, i32 1, i32 1, i32 1, i32 2, i32 2, i32 2, i32 2 }
 ; CHECK-NEXT:   %alloc_struct = alloca %struct, align 8 => ptr 0x30 [alloc_struct]
 ; CHECK-NEXT:   store %struct { [2 x i16] [i16 1, i16 2], i64 3 }, ptr %alloc_struct, align 8
-; CHECK-NEXT:   %val19 = load %struct, ptr %alloc_struct, align 8 => { { i16 1, i16 2 }, i64 3 }
-; CHECK-NEXT:   %val20 = load i64, ptr %alloc_struct, align 8 => i64 281486191577587
-; CHECK-NEXT:   %val21 = load i64, ptr %alloc_struct, align 8 => i64 281486962127119
+; CHECK-NEXT:   %val19 = load %struct, ptr %alloc_struct, align 8 => { { i16 3, i16 3 }, i64 3 }
+; CHECK-NEXT:   %val20 = load i64, ptr %alloc_struct, align 8 => i64 -3863260480978728461
+; CHECK-NEXT:   %val21 = load i64, ptr %alloc_struct, align 8 => i64 -7514139571062690004
 ; CHECK-NEXT:   %alloc_struct_packed = alloca %struct.packed, align 8 => ptr 0x40 [alloc_struct_packed]
 ; CHECK-NEXT:   store %struct.packed <{ [2 x i16] [i16 1, i16 2], i64 3 }>, ptr %alloc_struct_packed, align 1
-; CHECK-NEXT:   %val22 = load %struct.packed, ptr %alloc_struct_packed, align 1 => { { i16 1, i16 2 }, i64 3 }
-; CHECK-NEXT:   %val23 = load i64, ptr %alloc_struct_packed, align 8 => i64 281483566645248
-; CHECK-NEXT:   %val24 = load i64, ptr %alloc_struct_packed, align 8 => i64 281483566645248
+; CHECK-NEXT:   %val22 = load %struct.packed, ptr %alloc_struct_packed, align 1 => { { i16 3, i16 3 }, i64 3 }
+; CHECK-NEXT:   %val23 = load i64, ptr %alloc_struct_packed, align 8 => i64 5694717663415107584
+; CHECK-NEXT:   %val24 = load i64, ptr %alloc_struct_packed, align 8 => i64 -1143453889152942080
 ; CHECK-NEXT:   %alloc_struct_vscale = alloca %struct.vscale, align 8 => ptr 0x50 [alloc_struct_vscale]
 ; CHECK-NEXT:   store %struct.vscale zeroinitializer, ptr %alloc_struct_vscale, align 4
 ; CHECK-NEXT:   %gep4 = getelementptr <vscale x 1 x i32>, ptr %alloc_struct_vscale, i32 1 => ptr 0x60 [alloc_struct_vscale + 16]
 ; CHECK-NEXT:   store <vscale x 1 x i32> %ones, ptr %gep4, align 4
-; CHECK-NEXT:   %val25 = load %struct.vscale, ptr %alloc_struct_vscale, align 4 => { { i32 0, i32 0, i32 0, i32 0 }, { i32 1, i32 1, i32 1, i32 1 } }
+; CHECK-NEXT:   %val25 = load %struct.vscale, ptr %alloc_struct_vscale, align 4 => { { i32 1, i32 1, i32 1, i32 1 }, { i32 1, i32 1, i32 1, i32 1 } }
 ; CHECK-NEXT:   %alloc_array = alloca [2 x i32], align 4 => ptr 0x70 [alloc_array]
 ; CHECK-NEXT:   store [2 x i32] [i32 1, i32 2], ptr %alloc_array, align 4
-; CHECK-NEXT:   %val26 = load [2 x i32], ptr %alloc_array, align 4 => { i32 1, i32 2 }
+; CHECK-NEXT:   %val26 = load [2 x i32], ptr %alloc_array, align 4 => { i32 2, i32 2 }
 ; CHECK-NEXT:   %alloc_i1_vec = alloca <4 x i1>, align 1 => ptr 0x78 [alloc_i1_vec]
 ; CHECK-NEXT:   store <4 x i1> <i1 true, i1 false, i1 poison, i1 false>, ptr %alloc_i1_vec, align 1
-; CHECK-NEXT:   %val27 = load <4 x i1>, ptr %alloc_i1_vec, align 1 => { F, F, F, F }
+; CHECK-NEXT:   %val27 = load <4 x i1>, ptr %alloc_i1_vec, align 1 => { T, F, poison, F }
 ; CHECK-NEXT:   %val28 = load i8, ptr %alloc_i1_vec, align 1 => poison
 ; CHECK-NEXT:   %alloc_padding = alloca i31, align 4 => ptr 0x7C [alloc_padding]
 ; CHECK-NEXT:   store i32 0, ptr %alloc_padding, align 4
 ; CHECK-NEXT:   %alloc_padding_vec = alloca i64, align 8 => ptr 0x80 [alloc_padding_vec]
 ; CHECK-NEXT:   store { <6 x i5>, i32 } { <6 x i5> zeroinitializer, i32 -1 }, ptr %alloc_padding_vec, align 4
-; CHECK-NEXT:   %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec, align 4 => { { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }, i32 -1 }
-; CHECK-NEXT:   %load_vec = load <6 x i5>, ptr %alloc_padding_vec, align 4 => { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }
-; CHECK-NEXT:   %load_int_non_zero_padding = load i33, ptr %alloc_padding_vec, align 8 => i33 255
-; CHECK-NEXT:   %load_vec_non_zero_padding = load <3 x i11>, ptr %alloc_padding_vec, align 8 => { i11 255, i11 0, i11 0 }
+; CHECK-NEXT:   %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec, align 4 => { { i5 -1, i5 -1, i5 -1, i5 -1, i5 -1, i5 -1 }, i32 -1 }
+; CHECK-NEXT:   %load_vec = load <6 x i5>, ptr %alloc_padding_vec, align 4 => { i5 -1, i5 9, i5 -16, i5 -7, i5 -2, i5 9 }
+; CHECK-NEXT:   %load_int_non_zero_padding = load i33, ptr %alloc_padding_vec, align 8 => poison
+; CHECK-NEXT:   %load_vec_non_zero_padding = load <3 x i11>, ptr %alloc_padding_vec, align 8 => { poison, poison, poison }
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: Exiting function: main
diff --git a/llvm/test/tools/llubi/loadstore_le.ll b/llvm/test/tools/llubi/loadstore_le.ll
index bdd82ddabbfec..5b7bce3de5aa2 100644
--- a/llvm/test/tools/llubi/loadstore_le.ll
+++ b/llvm/test/tools/llubi/loadstore_le.ll
@@ -184,8 +184,8 @@ define void @main() {
 ; CHECK-NEXT:   store i32 0, ptr %alloc_padding, align 4
 ; CHECK-NEXT:   %alloc_padding_vec = alloca i64, align 8 => ptr 0x80 [alloc_padding_vec]
 ; CHECK-NEXT:   store { <6 x i5>, i32 } { <6 x i5> zeroinitializer, i32 -1 }, ptr %alloc_padding_vec, align 4
-; CHECK-NEXT:   %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec, align 4 => { { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }, i32 -1 }
-; CHECK-NEXT:   %load_vec = load <6 x i5>, ptr %alloc_padding_vec, align 4 => { i5 0, i5 0, i5 0, i5 0, i5 0, i5 0 }
+; CHECK-NEXT:   %load_agg = load { <6 x i5>, i32 }, ptr %alloc_padding_vec, align 4 => { { poison, poison, poison, poison, poison, poison }, i32 -1 }
+; CHECK-NEXT:   %load_vec = load <6 x i5>, ptr %alloc_padding_vec, align 4 => { poison, poison, poison, poison, poison, poison }
 ; CHECK-NEXT:   %load_int_non_zero_padding = load i33, ptr %alloc_padding_vec, align 8 => poison
 ; CHECK-NEXT:   %load_vec_non_zero_padding = load <3 x i11>, ptr %alloc_padding_vec, align 8 => { poison, poison, poison }
 ; CHECK-NEXT:   ret void
diff --git a/llvm/test/tools/llubi/loadstore_overaligned.ll b/llvm/test/tools/llubi/loadstore_overaligned.ll
index 5933ec4399fbb..664704ef2ff34 100644
--- a/llvm/test/tools/llubi/loadstore_overaligned.ll
+++ b/llvm/test/tools/llubi/loadstore_overaligned.ll
@@ -22,9 +22,9 @@ define void @main() {
 ; CHECK-NEXT:   %alloca = alloca [2 x i32], align 8 => ptr 0x8 [alloca]
 ; CHECK-NEXT:   store <4 x i32> zeroinitializer, ptr %alloca, align 8
 ; CHECK-NEXT:   store [2 x i32] [i32 1, i32 1], ptr %alloca, align 8
-; CHECK-NEXT:   %load1 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -289830082, i32 1, i32 0 }
-; CHECK-NEXT:   %load2 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -399511892, i32 1, i32 0 }
-; CHECK-NEXT:   %load3 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -132966042, i32 1, i32 0 }
+; CHECK-NEXT:   %load1 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -289830082, i32 1, i32 -399511892 }
+; CHECK-NEXT:   %load2 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -132966042, i32 1, i32 1094190734 }
+; CHECK-NEXT:   %load3 = load <4 x i32>, ptr %alloca, align 8 => { i32 1, i32 -1670034957, i32 1, i32 -899485425 }
 ; CHECK-NEXT:   %load_arr = load [2 x i32], ptr %alloca, align 8 => { i32 1, i32 1 }
 ; CHECK-NEXT:   ret void
 ; CHECK-NEXT: Exiting function: main
diff --git a/llvm/tools/llubi/lib/Context.cpp b/llvm/tools/llubi/lib/Context.cpp
index e84047b839bfa..dd08f24485b8d 100644
--- a/llvm/tools/llubi/lib/Context.cpp
+++ b/llvm/tools/llubi/lib/Context.cpp
@@ -110,283 +110,248 @@ const AnyValue &Context::getConstantValue(Constant *C) {
   return ConstCache.emplace(C, getConstantValueImpl(C)).first->second;
 }
 
-AnyValue Context::fromBytes(ArrayRef<Byte> Bytes, Type *Ty,
-                            uint32_t &OffsetInBits, bool CheckPaddingBits) {
-  if (Ty->isIntegerTy() || Ty->isFloatingPointTy() || Ty->isPointerTy()) {
-    uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
-    uint32_t NewOffsetInBits = OffsetInBits + NumBits;
-    if (CheckPaddingBits)
-      NewOffsetInBits = alignTo(NewOffsetInBits, 8);
-    bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
-    uint32_t NumBitsToExtract = NewOffsetInBits - OffsetInBits;
-    SmallVector<uint64_t> RawBits(alignTo(NumBitsToExtract, 8));
-    for (uint32_t I = 0; I < NumBitsToExtract; I += 8) {
-      // Try to form a 'logical' byte that represents the bits in the range
-      // [BitsStart, BitsEnd].
-      uint32_t NumBitsInByte = std::min(8U, NumBitsToExtract - I);
-      uint32_t BitsStart =
-          OffsetInBits +
-          (DL.isLittleEndian() ? I : (NumBitsToExtract - NumBitsInByte - I));
-      uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
-      Byte LogicalByte;
-      // Check whether it is a cross-byte access.
-      if (((BitsStart ^ BitsEnd) & ~7) == 0)
-        LogicalByte = Bytes[BitsStart / 8].lshr(BitsStart % 8);
-      else
-        LogicalByte =
-            Byte::fshr(Bytes[BitsStart / 8], Bytes[BitsEnd / 8], BitsStart % 8);
-
-      uint32_t Mask = (1U << NumBitsInByte) - 1;
-      // If any of the bits in the byte is poison, the whole value is poison.
-      if (~LogicalByte.ConcreteMask & ~LogicalByte.Value & Mask) {
-        OffsetInBits = NewOffsetInBits;
-        return AnyValue::poison();
-      }
-      uint8_t RandomBits = 0;
-      if (UndefBehavior == UndefValueBehavior::NonDeterministic &&
-          (~LogicalByte.ConcreteMask & Mask)) {
-        // This byte contains undef bits.
-        // We don't use std::uniform_int_distribution here because it produces
-        // different results across different library implementations. Instead,
-        // we directly use the low bits from Rng.
-        RandomBits = static_cast<uint8_t>(Rng());
-      }
-      uint8_t ActualBits = ((LogicalByte.Value & LogicalByte.ConcreteMask) |
-                            (RandomBits & ~LogicalByte.ConcreteMask)) &
-                           Mask;
-      RawBits[I / 64] |= static_cast<APInt::WordType>(ActualBits) << (I % 64);
+AnyValue Context::fromBytes(ConstBytesView Bytes, Type *Ty,
+                            uint32_t OffsetInBits, bool CheckPaddingBits) {
+  uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
+  uint32_t NewOffsetInBits = OffsetInBits + NumBits;
+  if (CheckPaddingBits)
+    NewOffsetInBits = alignTo(NewOffsetInBits, 8);
+  bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
+  uint32_t NumBitsToExtract = NewOffsetInBits - OffsetInBits;
+  SmallVector<uint64_t> RawBits(alignTo(NumBitsToExtract, 8));
+  for (uint32_t I = 0; I < NumBitsToExtract; I += 8) {
+    // Try to form a 'logical' byte that represents the bits in the range
+    // [BitsStart, BitsEnd].
+    uint32_t NumBitsInByte = std::min(8U, NumBitsToExtract - I);
+    uint32_t BitsStart = OffsetInBits + I;
+    uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
+    Byte LogicalByte;
+    // Check whether it is a cross-byte access.
+    if (((BitsStart ^ BitsEnd) & ~7) == 0)
+      LogicalByte = Bytes[BitsStart / 8].lshr(BitsStart % 8);
+    else
+      LogicalByte =
+          Byte::fshr(Bytes[BitsStart / 8], Bytes[BitsEnd / 8], BitsStart % 8);
+
+    uint32_t Mask = (1U << NumBitsInByte) - 1;
+    // If any of the bits in the byte is poison, the whole value is poison.
+    if (~LogicalByte.ConcreteMask & ~LogicalByte.Value & Mask) {
+      OffsetInBits = NewOffsetInBits;
+      return AnyValue::poison();
     }
-    OffsetInBits = NewOffsetInBits;
-
-    APInt Bits(NumBitsToExtract, RawBits);
-
-    // Padding bits for non-byte-sized scalar types must be zero.
-    if (NeedsPadding) {
-      if (!Bits.isIntN(NumBits))
-        return AnyValue::poison();
-      Bits = Bits.trunc(NumBits);
+    uint8_t RandomBits = 0;
+    if (UndefBehavior == UndefValueBehavior::NonDeterministic &&
+        (~LogicalByte.ConcreteMask & Mask)) {
+      // This byte contains undef bits.
+      // We don't use std::uniform_int_distribution here because it produces
+      // different results across different library implementations. Instead,
+      // we directly use the low bits from Rng.
+      RandomBits = static_cast<uint8_t>(Rng());
     }
+    uint8_t ActualBits = ((LogicalByte.Value & LogicalByte.ConcreteMask) |
+                          (RandomBits & ~LogicalByte.ConcreteMask)) &
+                         Mask;
+    RawBits[I / 64] |= static_cast<APInt::WordType>(ActualBits) << (I % 64);
+  }
+  OffsetInBits = NewOffsetInBits;
+
+  APInt Bits(NumBitsToExtract, RawBits);
 
-    if (Ty->isIntegerTy())
-      return Bits;
-    if (Ty->isFloatingPointTy())
-      return APFloat(Ty->getFltSemantics(), Bits);
-    assert(Ty->isPointerTy() && "Expect a pointer type");
-    // TODO: recover provenance
-    return Pointer(Bits);
+  // Padding bits for non-byte-sized scalar types must be zero.
+  if (NeedsPadding) {
+    if (!Bits.isIntN(NumBits))
+      return AnyValue::poison();
+    Bits = Bits.trunc(NumBits);
   }
 
-  assert(OffsetInBits % 8 == 0 && "Missing padding bits.");
+  if (Ty->isIntegerTy())
+    return Bits;
+  if (Ty->isFloatingPointTy())
+    return APFloat(Ty->getFltSemantics(), Bits);
+  assert(Ty->isPointerTy() && "Expect a pointer type");
+  // TODO: recover provenance
+  return Pointer(Bits);
+}
+
+AnyValue Context::fromBytes(ArrayRef<Byte> Bytes, Type *Ty) {
+  if (Ty->isIntegerTy() || Ty->isFloatingPointTy() || Ty->isPointerTy())
+    return fromBytes(ConstBytesView(Bytes, DL), Ty, /*OffsetInBits=*/0,
+                     /*CheckPaddingBits=*/true);
+
   if (auto *VecTy = dyn_cast<VectorType>(Ty)) {
     Type *ElemTy = VecTy->getElementType();
     uint32_t ElemBits = DL.getTypeSizeInBits(ElemTy).getFixedValue();
     uint32_t NumElements = getEVL(VecTy->getElementCount());
     // Check padding bits. <N x iM> acts as if an integer type with N * M bits.
-    uint32_t NewOffsetInBits = OffsetInBits + ElemBits * NumElements;
-    uint32_t AlignedNewOffsetInBits = alignTo(NewOffsetInBits, 8);
-    if (NewOffsetInBits != AlignedNewOffsetInBits) {
-      assert(NewOffsetInBits % 8 != 0 &&
-             AlignedNewOffsetInBits - NewOffsetInBits < 8 &&
-             "Unexpected offset.");
+    uint32_t VecBits = ElemBits * NumElements;
+    uint32_t AlignedVecBits = alignTo(VecBits, 8);
+    ConstBytesView View(Bytes, DL);
+    if (VecBits != AlignedVecBits) {
       // The padding bits are located in the last byte on little-endian systems.
       // On big-endian systems, the padding bits are located in the first byte.
-      const Byte &PaddingByte =
-          Bytes[(DL.isBigEndian() ? OffsetInBits : NewOffsetInBits) / 8];
-      uint32_t Mask = (~0U << (NewOffsetInBits % 8)) & 255U;
+      const Byte &PaddingByte = View[Bytes.size() - 1];
+      uint32_t Mask = (~0U << (VecBits % 8)) & 255U;
       // Make sure all high padding bits are zero.
-      if ((PaddingByte.ConcreteMask & ~PaddingByte.Value & Mask) != Mask) {
-        OffsetInBits = AlignedNewOffsetInBits;
+      if ((PaddingByte.ConcreteMask & ~PaddingByte.Value & Mask) != Mask)
         return AnyValue::getPoisonValue(*this, Ty);
-      }
-      if (DL.isBigEndian())
-        OffsetInBits += AlignedNewOffsetInBits - NewOffsetInBits;
     }
 
     std::vector<AnyValue> ValVec;
     ValVec.reserve(NumElements);
+    // For little endian element zero is put in the least significant bits of
+    // the integer, and for big endian element zero is put in the most
+    // significant bits.
     for (uint32_t I = 0; I != NumElements; ++I)
-      ValVec.push_back(
-          fromBytes(Bytes, ElemTy, OffsetInBits, /*CheckPaddingBits=*/false));
-    if (DL.isBigEndian())
-      std::reverse(ValVec.begin(), ValVec.end());
-    OffsetInBits = AlignedNewOffsetInBits;
+      ValVec.push_back(fromBytes(View, ElemTy,
+                                 DL.isLittleEndian()
+                                     ? I * ElemBits
+                                     : VecBits - ElemBits - I * ElemBits,
+                                 /*CheckPaddingBits=*/false));
     return AnyValue(std::move(ValVec));
   }
   if (auto *ArrTy = dyn_cast<ArrayType>(Ty)) {
     Type *ElemTy = ArrTy->getElementType();
-    uint32_t StrideInBits = getEffectiveTypeAllocSize(ElemTy) * 8;
-    std::vector<AnyValue> ValVec;
+    uint64_t Stride = getEffectiveTypeAllocSize(ElemTy);
     uint32_t NumElements = ArrTy->getNumElements();
+    std::vector<AnyValue> ValVec;
     ValVec.reserve(NumElements);
-    uint32_t BaseOffsetInBits = OffsetInBits;
-    for (uint32_t I = 0; I != NumElements; ++I) {
-      OffsetInBits = BaseOffsetInBits + I * StrideInBits;
-      ValVec.push_back(
-          fromBytes(Bytes, ElemTy, OffsetInBits, /*CheckPaddingBits=*/true));
-    }
+    for (uint32_t I = 0; I != NumElements; ++I)
+      ValVec.push_back(fromBytes(Bytes.slice(I * Stride), ElemTy));
     return AnyValue(std::move(ValVec));
   }
   if (auto *StructTy = dyn_cast<StructType>(Ty)) {
-    auto *Layout = DL.getStructLayout(StructTy);
-    uint32_t BaseOffsetInBits = OffsetInBits;
+    const StructLayout *Layout = DL.getStructLayout(StructTy);
     std::vector<AnyValue> ValVec;
     uint32_t NumElements = StructTy->getNumElements();
     ValVec.reserve(NumElements);
-    for (uint32_t I = 0; I != NumElements; ++I) {
-      Type *ElemTy = StructTy->getElementType(I);
-      TypeSize ElemOffset = Layout->getElementOffset(I);
-      OffsetInBits = BaseOffsetInBits + getEffectiveTypeSize(ElemOffset) * 8;
-      ValVec.push_back(
-          fromBytes(Bytes, ElemTy, OffsetInBits, /*CheckPaddingBits=*/true));
-    }
-    OffsetInBits =
-        BaseOffsetInBits +
-        static_cast<uint32_t>(getEffectiveTypeStoreSize(StructTy)) * 8;
+    for (uint32_t I = 0; I != NumElements; ++I)
+      ValVec.push_back(fromBytes(
+          Bytes.slice(getEffectiveTypeSize(Layout->getElementOffset(I))),
+          StructTy->getElementType(I)));
     return AnyValue(std::move(ValVec));
   }
   llvm_unreachable("Unsupported first class type.");
 }
 
-void Context::toBytes(const AnyValue &Val, Type *Ty, uint32_t &OffsetInBits,
-                      MutableArrayRef<Byte> Bytes, bool PaddingBits) {
-  if (Val.isPoison() || Ty->isIntegerTy() || Ty->isFloatingPointTy() ||
-      Ty->isPointerTy()) {
-    uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
-    uint32_t NewOffsetInBits = OffsetInBits + NumBits;
-    if (PaddingBits)
-      NewOffsetInBits = alignTo(NewOffsetInBits, 8);
-    bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
-    auto WriteBits = [&](const APInt &Bits) {
-      for (uint32_t I = 0, E = Bits.getBitWidth(); I < E; I += 8) {
-        uint32_t NumBitsInByte = std::min(8U, E - I);
-        uint32_t BitsStart =
-            OffsetInBits + (DL.isLittleEndian() ? I : (E - NumBitsInByte - I));
-        uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
-        uint8_t BitsVal =
-            static_cast<uint8_t>(Bits.extractBitsAsZExtValue(NumBitsInByte, I));
-
-        Bytes[BitsStart / 8].writeBits(
-            static_cast<uint8_t>(((1U << NumBitsInByte) - 1)
-                                 << (BitsStart % 8)),
-            static_cast<uint8_t>(BitsVal << (BitsStart % 8)));
-        // If it is a cross-byte access, write the remaining bits to the next
-        // byte.
-        if (((BitsStart ^ BitsEnd) & ~7) != 0)
-          Bytes[BitsEnd / 8].writeBits(
-              static_cast<uint8_t>((1U << (BitsEnd % 8 + 1)) - 1),
-              static_cast<uint8_t>(BitsVal >> (8 - (BitsStart % 8))));
-      }
-    };
-    if (Val.isPoison()) {
-      for (uint32_t I = 0, E = NewOffsetInBits - OffsetInBits; I < E;) {
-        uint32_t NumBitsInByte = std::min(8 - (OffsetInBits + I) % 8, E - I);
-        assert(((OffsetInBits ^ (OffsetInBits + NumBitsInByte - 1)) & ~7) ==
-                   0 &&
-               "Across byte boundary.");
-        Bytes[(OffsetInBits + I) / 8].poisonBits(static_cast<uint8_t>(
-            ((1U << NumBitsInByte) - 1) << ((OffsetInBits + I) % 8)));
-        I += NumBitsInByte;
-      }
-    } else if (Ty->isIntegerTy()) {
-      auto &Bits = Val.asInteger();
-      WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits)
-                             : Bits);
-    } else if (Ty->isFloatingPointTy()) {
-      auto Bits = Val.asFloat().bitcastToAPInt();
-      WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits)
-                             : Bits);
-    } else if (Ty->isPointerTy()) {
-      auto &Bits = Val.asPointer().address();
-      WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits)
-                             : Bits);
-      // TODO: save metadata of the pointer.
-    } else {
-      llvm_unreachable("Unsupported scalar type.");
+void Context::toBytes(const AnyValue &Val, Type *Ty, uint32_t OffsetInBits,
+                      MutableBytesView Bytes, bool PaddingBits) {
+  uint32_t NumBits = DL.getTypeSizeInBits(Ty).getFixedValue();
+  uint32_t NewOffsetInBits = OffsetInBits + NumBits;
+  if (PaddingBits)
+    NewOffsetInBits = alignTo(NewOffsetInBits, 8);
+  bool NeedsPadding = NewOffsetInBits != OffsetInBits + NumBits;
+  auto WriteBits = [&](const APInt &Bits) {
+    for (uint32_t I = 0, E = Bits.getBitWidth(); I < E; I += 8) {
+      uint32_t NumBitsInByte = std::min(8U, E - I);
+      uint32_t BitsStart = OffsetInBits + I;
+      uint32_t BitsEnd = BitsStart + NumBitsInByte - 1;
+      uint8_t BitsVal =
+          static_cast<uint8_t>(Bits.extractBitsAsZExtValue(NumBitsInByte, I));
+
+      Bytes[BitsStart / 8].writeBits(
+          static_cast<uint8_t>(((1U << NumBitsInByte) - 1) << (BitsStart % 8)),
+          static_cast<uint8_t>(BitsVal << (BitsStart % 8)));
+      // If it is a cross-byte access, write the remaining bits to the next
+      // byte.
+      if (((BitsStart ^ BitsEnd) & ~7) != 0)
+        Bytes[BitsEnd / 8].writeBits(
+            static_cast<uint8_t>((1U << (BitsEnd % 8 + 1)) - 1),
+            static_cast<uint8_t>(BitsVal >> (8 - (BitsStart % 8))));
+    }
+  };
+  if (Val.isPoison()) {
+    for (uint32_t I = 0, E = NewOffsetInBits - OffsetInBits; I < E;) {
+      uint32_t NumBitsInByte = std::min(8 - (OffsetInBits + I) % 8, E - I);
+      assert(((OffsetInBits ^ (OffsetInBits + NumBitsInByte - 1)) & ~7) == 0 &&
+             "Across byte boundary.");
+      Bytes[(OffsetInBits + I) / 8].poisonBits(static_cast<uint8_t>(
+          ((1U << NumBitsInByte) - 1) << ((OffsetInBits + I) % 8)));
+      I += NumBitsInByte;
     }
-    OffsetInBits = NewOffsetInBits;
+  } else if (Ty->isIntegerTy()) {
+    auto &Bits = Val.asInteger();
+    WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits) : Bits);
+  } else if (Ty->isFloatingPointTy()) {
+    auto Bits = Val.asFloat().bitcastToAPInt();
+    WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits) : Bits);
+  } else if (Ty->isPointerTy()) {
+    auto &Bits = Val.asPointer().address();
+    WriteBits(NeedsPadding ? Bits.zext(NewOffsetInBits - OffsetInBits) : Bits);
+    // TODO: save metadata of the pointer.
+  } else {
+    llvm_unreachable("Unsupported scalar type.");
+  }
+}
+
+void Context::toBytes(const AnyValue &Val, Type *Ty,
+                      MutableArrayRef<Byte> Bytes) {
+  if (Ty->isIntegerTy() || Ty->isFloatingPointTy() || Ty->isPointerTy()) {
+    toBytes(Val, Ty, /*OffsetInBits=*/0, MutableBytesView(Bytes, DL),
+            /*PaddingBits=*/true);
     return;
   }
 
-  assert(OffsetInBits % 8 == 0 && "Missing padding bits.");
   if (auto *VecTy = dyn_cast<VectorType>(Ty)) {
     Type *ElemTy = VecTy->getElementType();
-    auto &ValVec = Val.asAggregate();
-    uint32_t NewOffsetInBits =
-        alignTo(OffsetInBits + DL.getTypeSizeInBits(ElemTy).getFixedValue() *
-                                   ValVec.size(),
-                8);
+    uint32_t ElemBits = DL.getTypeSizeInBits(ElemTy).getFixedValue();
+    uint32_t NumElements = getEVL(VecTy->getElementCount());
+    // Zero padding bits. <N x iM> acts as if an integer type with N * M bits.
+    uint32_t VecBits = ElemBits * NumElements;
+    uint32_t AlignedVecBits = alignTo(VecBits, 8);
+    MutableBytesView View(Bytes, DL);
+    if (VecBits != AlignedVecBits) {
+      // The padding bits are located in the last byte on little-endian systems.
+      // On big-endian systems, the padding bits are located in the first byte.
+      Byte &PaddingByte = View[Bytes.size() - 1];
+      uint32_t Mask = (~0U << (VecBits % 8)) & 255U;
+      PaddingByte.zeroBits(Mask);
+    }
+    // For little endian element zero is put in the least significant bits of
+    // the integer, and for big endian element zero is put in the most
+    // significant bits.
     if (DL.isLittleEndian()) {
-      for (const auto &SubVal : ValVec)
-        toBytes(SubVal, ElemTy, OffsetInBits, Bytes,
-                /*PaddingBits=*/false);
+      for (const auto &[I, Val] : enumerate(Val.asAggregate()))
+        toBytes(Val, ElemTy, ElemBits * I, View, /*PaddingBits=*/false);
     } else {
-      for (const auto &SubVal : reverse(ValVec))
-        toBytes(SubVal, ElemTy, OffsetInBits, Bytes,
-                /*PaddingBits=*/false);
-    }
-    if (NewOffsetInBits != OffsetInBits) {
-      assert(OffsetInBits % 8 != 0 && NewOffsetInBits - OffsetInBits < 8 &&
-             "Unexpected offset.");
-      // Fill remaining bits with zero.
-      Bytes[OffsetInBits / 8].zeroBits(
-          static_cast<uint8_t>(~0U << (OffsetInBits % 8)));
+      for (const auto &[I, Val] : enumerate(reverse(Val.asAggregate())))
+        toBytes(Val, ElemTy, ElemBits * I, View, /*PaddingBits=*/false);
     }
-    OffsetInBits = NewOffsetInBits;
     return;
   }
-  auto FillUndefBytes = [&](uint32_t NewOffsetInBits) {
-    if (OffsetInBits == NewOffsetInBits)
-      return;
-    // Fill padding bits due to alignment requirement.
-    assert(NewOffsetInBits > OffsetInBits &&
-           "Unexpected negative padding bits!");
-    fill(Bytes.slice(OffsetInBits / 8, (NewOffsetInBits - OffsetInBits) / 8),
-         Byte::undef());
-    OffsetInBits = NewOffsetInBits;
+
+  // Fill padding bytes due to alignment requirement.
+  auto FillUndefBytes = [&](uint64_t Begin, uint64_t End) {
+    fill(Bytes.slice(Begin, End - Begin), Byte::undef());
   };
   if (auto *ArrTy = dyn_cast<ArrayType>(Ty)) {
     Type *ElemTy = ArrTy->getElementType();
-    uint32_t CurrentOffsetInBits = OffsetInBits;
-    uint32_t StrideInBits = getEffectiveTypeAllocSize(ElemTy) * 8;
+    uint64_t Offset = 0;
+    uint64_t StoreSize = getEffectiveTypeStoreSize(ElemTy);
+    uint64_t Stride = getEffectiveTypeAllocSize(ElemTy);
     for (const auto &SubVal : Val.asAggregate()) {
-      FillUndefBytes(CurrentOffsetInBits);
-      toBytes(SubVal, ElemTy, OffsetInBits, Bytes, /*PaddingBits=*/true);
-      CurrentOffsetInBits += StrideInBits;
+      toBytes(SubVal, ElemTy, Bytes.slice(Offset));
+      FillUndefBytes(Offset + StoreSize, Offset + Stride);
+      Offset += Stride;
     }
     return;
   }
   if (auto *StructTy = dyn_cast<StructType>(Ty)) {
-    auto *Layout = DL.getStructLayout(StructTy);
-    uint32_t BaseOffsetInBits = OffsetInBits;
+    const StructLayout *Layout = DL.getStructLayout(StructTy);
+    uint64_t LastAccessedOffset = 0;
     for (uint32_t I = 0, E = Val.asAggregate().size(); I != E; ++I) {
       Type *ElemTy = StructTy->getElementType(I);
-      TypeSize ElemOffset = Layout->getElementOffset(I);
-      uint32_t NewOffsetInBits =
-          BaseOffsetInBits + getEffectiveTypeSize(ElemOffset) * 8;
-      FillUndefBytes(NewOffsetInBits);
-      toBytes(Val.asAggregate()[I], ElemTy, OffsetInBits, Bytes,
-              /*PaddingBits=*/true);
+      uint64_t ElemOffset = getEffectiveTypeSize(Layout->getElementOffset(I));
+      FillUndefBytes(LastAccessedOffset, ElemOffset);
+      toBytes(Val.asAggregate()[I], ElemTy, Bytes.slice(ElemOffset));
+      LastAccessedOffset = ElemOffset + getEffectiveTypeStoreSize(ElemTy);
     }
-    uint32_t NewOffsetInBits =
-        BaseOffsetInBits + getEffectiveTypeStoreSize(StructTy) * 8;
-    FillUndefBytes(NewOffsetInBits);
+    FillUndefBytes(LastAccessedOffset, getEffectiveTypeStoreSize(StructTy));
     return;
   }
 
   llvm_unreachable("Unsupported first class type.");
 }
 
-AnyValue Context::fromBytes(ArrayRef<Byte> Bytes, Type *Ty) {
-  uint32_t OffsetInBits = 0;
-  return fromBytes(Bytes, Ty, OffsetInBits, /*CheckPaddingBits=*/true);
-}
-
-void Context::toBytes(const AnyValue &Val, Type *Ty,
-                      MutableArrayRef<Byte> Bytes) {
-  uint32_t OffsetInBits = 0;
-  toBytes(Val, Ty, OffsetInBits, Bytes, /*PaddingBits=*/true);
-}
-
 AnyValue Context::load(MemoryObject &MO, uint64_t Offset, Type *ValTy) {
   return fromBytes(
       MO.getBytes().slice(Offset, getEffectiveTypeStoreSize(ValTy)), ValTy);
diff --git a/llvm/tools/llubi/lib/Context.h b/llvm/tools/llubi/lib/Context.h
index a250004b3cb54..1e08fe13ed71d 100644
--- a/llvm/tools/llubi/lib/Context.h
+++ b/llvm/tools/llubi/lib/Context.h
@@ -116,6 +116,23 @@ class EventHandler {
   }
 };
 
+/// Endianness aware accessor for bytes.
+template <typename ArrayRefT> class BytesView {
+  ArrayRefT Bytes;
+  bool IsLittleEndian;
+
+public:
+  explicit BytesView(ArrayRefT Ref, const DataLayout &DL)
+      : Bytes(Ref), IsLittleEndian(DL.isLittleEndian()) {}
+
+  decltype(auto) operator[](uint32_t Index) {
+    return Bytes[IsLittleEndian ? Index : Bytes.size() - 1 - Index];
+  }
+};
+
+using ConstBytesView = BytesView<ArrayRef<Byte>>;
+using MutableBytesView = BytesView<MutableArrayRef<Byte>>;
+
 /// The global context for the interpreter.
 /// It tracks global state such as heap memory objects and floating point
 /// environment.
@@ -149,10 +166,10 @@ class Context {
   // precisely after we make ptrtoint have the implicit side-effect of exposing
   // the provenance.
   std::map<uint64_t, IntrusiveRefCntPtr<MemoryObject>> MemoryObjects;
-  AnyValue fromBytes(ArrayRef<Byte> Bytes, Type *Ty, uint32_t &OffsetInBits,
+  AnyValue fromBytes(ConstBytesView Bytes, Type *Ty, uint32_t OffsetInBits,
                      bool CheckPaddingBits);
-  void toBytes(const AnyValue &Val, Type *Ty, uint32_t &OffsetInBits,
-               MutableArrayRef<Byte> Bytes, bool PaddingBits);
+  void toBytes(const AnyValue &Val, Type *Ty, uint32_t OffsetInBits,
+               MutableBytesView Bytes, bool PaddingBits);
 
   // Constants
   // Use std::map to avoid iterator/reference invalidation.



More information about the llvm-commits mailing list