[llvm] [BPF] Add CLI option to enable misaligned memory access (PR #167013)

Claire Fan via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 10 23:58:29 PST 2025


https://github.com/clairechingching updated https://github.com/llvm/llvm-project/pull/167013

>From 6bbc7016223bd1776519fdba148909eac16070fd Mon Sep 17 00:00:00 2001
From: Claire xyz <fanyungching at gmail.com>
Date: Fri, 7 Nov 2025 11:08:47 -0500
Subject: [PATCH] [BPF] Add CLI option to enable misaligned memory access

---
 llvm/lib/Target/BPF/BPFISelLowering.cpp       |  24 +++
 llvm/lib/Target/BPF/BPFISelLowering.h         |   4 +
 llvm/test/CodeGen/BPF/unaligned_load_store.ll | 196 ++++++++++++++++++
 3 files changed, 224 insertions(+)
 create mode 100644 llvm/test/CodeGen/BPF/unaligned_load_store.ll

diff --git a/llvm/lib/Target/BPF/BPFISelLowering.cpp b/llvm/lib/Target/BPF/BPFISelLowering.cpp
index 3c61216cd9327..cc7d89bcaa333 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.cpp
+++ b/llvm/lib/Target/BPF/BPFISelLowering.cpp
@@ -43,6 +43,10 @@ static cl::opt<unsigned> BPFMinimumJumpTableEntries(
     "bpf-min-jump-table-entries", cl::init(13), cl::Hidden,
     cl::desc("Set minimum number of entries to use a jump table on BPF"));
 
+static cl::opt<bool> BPFAllowMisalignedMemAccess("bpf-allow-misaligned-mem-access",
+  cl::Hidden, cl::init(false),
+  cl::desc("Allow misaligned memory access"));
+
 static void fail(const SDLoc &DL, SelectionDAG &DAG, const Twine &Msg,
                  SDValue Val = {}) {
   std::string Str;
@@ -208,6 +212,26 @@ BPFTargetLowering::BPFTargetLowering(const TargetMachine &TM,
   HasMovsx = STI.hasMovsx();
 }
 
+bool BPFTargetLowering::allowsMisalignedMemoryAccesses(
+    EVT VT, unsigned, Align, MachineMemOperand::Flags, unsigned *Fast) const {
+  if (!BPFAllowMisalignedMemAccess) {
+  	// --bpf-allow-misaligned-mem-access isn't opted in
+	return false;
+  }	
+  
+  if (!VT.isSimple()) {
+    // only allow misalignment for simple value types
+	return false;
+  }
+  
+  if (Fast) {
+	// always assume fast mode when BPFAllowMisalignedMemAccess is enabled 
+    *Fast = true;
+  }
+  
+  return true;
+}
+
 bool BPFTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
   return false;
 }
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.h b/llvm/lib/Target/BPF/BPFISelLowering.h
index 3d6e7c70df28b..0e6ad216af4ca 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.h
+++ b/llvm/lib/Target/BPF/BPFISelLowering.h
@@ -32,6 +32,10 @@ class BPFTargetLowering : public TargetLowering {
   // with the given GlobalAddress is legal.
   bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
 
+  bool allowsMisalignedMemoryAccesses(EVT VT, unsigned, Align,
+                                      MachineMemOperand::Flags,
+                                      unsigned *) const override;
+
   BPFTargetLowering::ConstraintType
   getConstraintType(StringRef Constraint) const override;
 
diff --git a/llvm/test/CodeGen/BPF/unaligned_load_store.ll b/llvm/test/CodeGen/BPF/unaligned_load_store.ll
new file mode 100644
index 0000000000000..9ddbd7e64f7bf
--- /dev/null
+++ b/llvm/test/CodeGen/BPF/unaligned_load_store.ll
@@ -0,0 +1,196 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -mtriple=bpfel -bpf-allow-misaligned-mem-access -verify-machineinstrs %s -o - \
+; RUN:    | FileCheck --check-prefixes=ALL,MISALIGN %s
+; RUN: llc -mtriple=bpfeb -bpf-allow-misaligned-mem-access -verify-machineinstrs %s -o - \
+; RUN:    | FileCheck --check-prefixes=ALL,MISALIGN %s
+
+; RUN: llc -mtriple=bpfel -verify-machineinstrs %s -o - \
+; RUN:    | FileCheck --check-prefixes=ALL,ALIGN %s
+; RUN: llc -mtriple=bpfeb -verify-machineinstrs %s -o - \
+; RUN:    | FileCheck --check-prefixes=ALL,ALIGN %s
+; NOTE:
+;   This test verifies that the new +bpf-allow-misaligned-mem-access
+;   feature allows the BPF backend to emit direct unaligned load/store
+;   instructions instead of byte-by-byte emulation sequences.
+
+; ---------------------------------------------------------------------
+; i8 load
+; ---------------------------------------------------------------------
+define i8 @test_load_i8(i8* %p) {
+; ALL-LABEL: test_load_i8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    w{{[0-9]+}} = *(u8 *)(r1 + 0)
+; ALL-NEXT:    exit
+  %v = load i8, i8* %p, align 1
+  ret i8 %v
+}
+
+; ---------------------------------------------------------------------
+; i8 store
+; ---------------------------------------------------------------------
+define void @test_store_i8(i8* %p, i8 %v) {
+; ALL-LABEL: test_store_i8:
+; ALL:       # %bb.0:
+; ALL-NEXT:    *(u8 *)(r1 + 0) = w{{[0-9]+}}
+; ALL-NEXT:    exit
+  store i8 %v, i8* %p, align 1
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; i16 load
+; ---------------------------------------------------------------------
+define i16 @test_load_i16(i16* %p) {
+; MISALIGN-LABEL: test_load_i16:
+; MISALIGN:       # %bb.0:
+; MISALIGN:    w{{[0-9]+}} = *(u16 *)(r1 + 0)
+; MISALIGN:    exit
+;
+; ALIGN-LABEL: test_load_i16:
+; ALIGN:       # %bb.0:
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 0)
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 1)
+; ALIGN-DAG:    w{{[0-9]+}} <<= 8
+; ALIGN-DAG:    w{{[0-9]+}} |= w{{[0-9]+}}
+; ALIGN:        exit
+  %v = load i16, i16* %p, align 1
+  ret i16 %v
+}
+
+; ---------------------------------------------------------------------
+; i16 store
+; ---------------------------------------------------------------------
+define void @test_store_i16(i16* %p, i16 %v) {
+; MISALIGN-LABEL: test_store_i16:
+; MISALIGN:       # %bb.0:
+; MISALIGN:    *(u16 *)(r1 + 0) = w{{[0-9]+}}
+; MISALIGN:    exit
+;
+; ALIGN-LABEL: test_store_i16:
+; ALIGN:       # %bb.0:
+; ALIGN-DAG:    *(u8 *)(r1 + 0) = w{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} >>= 8
+; ALIGN-DAG:    *(u8 *)(r1 + 1) = w{{[0-9]+}}
+; ALIGN:        exit
+  store i16 %v, i16* %p, align 1
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; i32 load
+; ---------------------------------------------------------------------
+
+define i32 @test_load_i32(i32* %p) {
+; MISALIGN-LABEL: test_load_i32:
+; MISALIGN:       # %bb.0:
+; MISALIGN:    w{{[0-9]+}} = *(u32 *)(r1 + 0)
+; MISALIGN:    exit
+;
+; ALIGN-LABEL: test_load_i32:
+; ALIGN:       # %bb.0:
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 0)
+; ALIGN-DAG:    w{{[0-9]+}} <<= 8
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 1)
+; ALIGN-DAG:    w{{[0-9]+}} |= w{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 2)
+; ALIGN-DAG:    w{{[0-9]+}} <<= 16
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 3)
+; ALIGN-DAG:    w{{[0-9]+}} <<= 24
+; ALIGN:        exit
+  %v = load i32, i32* %p, align 1
+  ret i32 %v
+}
+
+; ---------------------------------------------------------------------
+; i32 store
+; ---------------------------------------------------------------------
+
+define void @test_store_i32(i32* %p, i32 %v) {
+; MISALIGN-LABEL: test_store_i32:
+; MISALIGN:       # %bb.0:
+; MISALIGN:    *(u32 *)(r1 + 0) = w{{[0-9]+}}
+; MISALIGN:    exit
+;
+; ALIGN-LABEL: test_store_i32:
+; ALIGN:       # %bb.0:
+; ALIGN-DAG:    w{{[0-9]+}} = w{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} >>= 24
+; ALIGN-DAG:    *(u8 *)(r1 + 0) = w{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} = w{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} >>= 16
+; ALIGN-DAG:    *(u8 *)(r1 + 1) = w{{[0-9]+}}
+; ALIGN-DAG:    *(u8 *)(r1 + 2) = w{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} >>= 8
+; ALIGN-DAG:    *(u8 *)(r1 + 3) = w{{[0-9]+}}
+; ALIGN:        exit
+  store i32 %v, i32* %p, align 1
+  ret void
+}
+
+; ---------------------------------------------------------------------
+; i64 load
+; ---------------------------------------------------------------------
+
+define i64 @test_load_i64(i64* %p) {
+; MISALIGN-LABEL: test_load_i64:
+; MISALIGN:       # %bb.0:
+; MISALIGN:    r0 = *(u64 *)(r1 + 0)
+; MISALIGN:    exit
+;
+; ALIGN-LABEL: test_load_i64:
+; ALIGN:       # %bb.0:
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 0)
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 1)
+; ALIGN-DAG:    r{{[0-9]+}} <<= 8
+; ALIGN-DAG:    r{{[0-9]+}} |= r{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 2)
+; ALIGN-DAG:    r{{[0-9]+}} <<= 16
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 3)
+; ALIGN-DAG:    r{{[0-9]+}} <<= 24
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 4)
+; ALIGN-DAG:    w{{[0-9]+}} <<= 8
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 5)
+; ALIGN-DAG:    w{{[0-9]+}} |= w{{[0-9]+}}
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 6)
+; ALIGN-DAG:    w{{[0-9]+}} <<= 16
+; ALIGN-DAG:    w{{[0-9]+}} = *(u8 *)(r1 + 7)
+; ALIGN-DAG:    w{{[0-9]+}} <<= 24
+; ALIGN-DAG:    r{{[0-9]+}} <<= 32
+; ALIGN:        exit
+  %v = load i64, i64* %p, align 1
+  ret i64 %v
+}
+
+; ---------------------------------------------------------------------
+; i64 store
+; ---------------------------------------------------------------------
+
+define void @test_store_i64(i64* %p, i64 %v) {
+; MISALIGN-LABEL: test_store_i64:
+; MISALIGN:       # %bb.0:
+; MISALIGN:    *(u64 *)(r1 + 0) = r2
+; MISALIGN:    exit
+;
+; ALIGN-LABEL: test_store_i64:
+; ALIGN:       # %bb.0:
+; ALIGN-DAG:    *(u8 *)(r1 + 0) = w{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} = r{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} >>= 56
+; ALIGN-DAG:    *(u8 *)(r1 + 1) = w{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} >>= 48
+; ALIGN-DAG:    *(u8 *)(r1 + 2) = w{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} >>= 40
+; ALIGN-DAG:    *(u8 *)(r1 + 3) = w{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} >>= 32
+; ALIGN-DAG:    *(u8 *)(r1 + 4) = w{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} >>= 24
+; ALIGN-DAG:    *(u8 *)(r1 + 5) = w{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} >>= 16
+; ALIGN-DAG:    *(u8 *)(r1 + 6) = w{{[0-9]+}}
+; ALIGN-DAG:    r{{[0-9]+}} >>= 8
+; ALIGN-DAG:    *(u8 *)(r1 + 7) = w{{[0-9]+}}
+; ALIGN:        exit
+  store i64 %v, i64* %p, align 1
+  ret void
+}



More information about the llvm-commits mailing list