[llvm] [AArch64] Fix vectorToScalarBitmask BE (#156312) (PR #156314)

Giuseppe Cesarano via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 1 04:15:15 PDT 2025


https://github.com/GiuseppeCesarano created https://github.com/llvm/llvm-project/pull/156314

Closes #156312 

>From d294d467a75d99f6f4eccd462f9aa303c1f4c5e9 Mon Sep 17 00:00:00 2001
From: Giuseppe Cesarano <PecoraInPannaCotta at gmail.com>
Date: Mon, 1 Sep 2025 13:04:57 +0200
Subject: [PATCH] [AArch64] Fix vectorToScalarBitmask BE (#156312)

---
 .../Target/AArch64/AArch64ISelLowering.cpp    | 11 ++-
 .../AArch64/vector-to-scalar-bitmask.ll       | 89 +++++++++++++++++++
 2 files changed, 97 insertions(+), 3 deletions(-)
 create mode 100644 llvm/test/CodeGen/AArch64/vector-to-scalar-bitmask.ll

diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b7011e0ea1669..ea83e9d12069b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -24168,6 +24168,7 @@ static SDValue vectorToScalarBitmask(SDNode *N, SelectionDAG &DAG) {
   // Ensure that all elements' bits are either 0s or 1s.
   ComparisonResult = DAG.getSExtOrTrunc(ComparisonResult, DL, VecVT);
 
+  bool IsLE = DAG.getDataLayout().isLittleEndian();
   SmallVector<SDValue, 16> MaskConstants;
   if (DAG.getSubtarget<AArch64Subtarget>().isNeonAvailable() &&
       VecVT == MVT::v16i8) {
@@ -24175,7 +24176,10 @@ static SDValue vectorToScalarBitmask(SDNode *N, SelectionDAG &DAG) {
     // per entry. We split it into two halves, apply the mask, zip the halves to
     // create 8x 16-bit values, and the perform the vector reduce.
     for (unsigned Half = 0; Half < 2; ++Half) {
-      for (unsigned MaskBit = 1; MaskBit <= 128; MaskBit *= 2) {
+      for (unsigned I = 0; I < 8; ++I) {
+        // On big-endian targets, the lane order in sub-byte vector elements
+        // gets reversed, so we need to flip the bit index.
+        unsigned MaskBit = IsLE ? (1u << I) : (1u << (7 - I));
         MaskConstants.push_back(DAG.getConstant(MaskBit, DL, MVT::i32));
       }
     }
@@ -24193,8 +24197,9 @@ static SDValue vectorToScalarBitmask(SDNode *N, SelectionDAG &DAG) {
   }
 
   // All other vector sizes.
-  unsigned MaxBitMask = 1u << (VecVT.getVectorNumElements() - 1);
-  for (unsigned MaskBit = 1; MaskBit <= MaxBitMask; MaskBit *= 2) {
+  unsigned NumEl = VecVT.getVectorNumElements();
+  for (unsigned I = 0; I < NumEl; ++I) {
+    unsigned MaskBit = IsLE ? (1u << I) : (1u << (NumEl - 1 - I));
     MaskConstants.push_back(DAG.getConstant(MaskBit, DL, MVT::i64));
   }
 
diff --git a/llvm/test/CodeGen/AArch64/vector-to-scalar-bitmask.ll b/llvm/test/CodeGen/AArch64/vector-to-scalar-bitmask.ll
new file mode 100644
index 0000000000000..59c8b7389db54
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/vector-to-scalar-bitmask.ll
@@ -0,0 +1,89 @@
+; RUN: llc -O0 -mtriple=aarch64-linux-gnu %s -o - | FileCheck %s --check-prefix=CHECK-LE
+; RUN: llc -O0 -mtriple=aarch64_be-linux-gnu %s -o - | FileCheck %s --check-prefix=CHECK-BE
+
+ at haystack4 = internal unnamed_addr constant [4 x i32] [i32 0, i32 1, i32 2, i32 3], align 4
+ at haystack16 = internal unnamed_addr constant [16 x i8] [i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15], align 16
+
+
+define i8 @test4() {
+  %matches = alloca <4 x i1>, align 1
+  %index_ptr = alloca i64, align 8
+  store i64 0, ptr %index_ptr, align 8
+  %index_val = load i64, ptr %index_ptr, align 8
+  %haystack = getelementptr inbounds i32, ptr getelementptr inbounds (i8, ptr @haystack4, i64 0), i64 %index_val
+  %h_vec = load <4 x i32>, ptr %haystack, align 4
+  %cmp_vec = icmp eq <4 x i32> %h_vec, <i32 2, i32 2, i32 2, i32 2>
+  store <4 x i1> %cmp_vec, ptr %matches, align 1
+  %cmp_load = load <4 x i1>, ptr %matches, align 1
+  %extr = extractelement <4 x i1> %cmp_load, i64 2
+  %ret = zext i1 %extr to i8
+  ret i8 %ret
+}
+
+define i8 @test16() {
+  %matches = alloca <16 x i1>, align 2
+  %index_ptr = alloca i64, align 8
+  store i64 0, ptr %index_ptr, align 8
+  %index_val = load i64, ptr %index_ptr, align 8
+  %haystack = getelementptr inbounds i8, ptr getelementptr inbounds (i8, ptr @haystack16, i64 0), i64 %index_val
+  %h_vec = load <16 x i8>, ptr %haystack, align 16
+  %cmp_vec = icmp eq <16 x i8> %h_vec, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
+  store <16 x i1> %cmp_vec, ptr %matches, align 2
+  %cmp_load = load <16 x i1>, ptr %matches, align 2
+  %extr = extractelement <16 x i1> %cmp_load, i64 7
+  %ret = zext i1 %extr to i8
+  ret i8 %ret
+}
+
+; Little endian
+
+; CHECK-LE-LABEL: .LCPI0_0:
+; CHECK-LE-NEXT: .word 1
+; CHECK-LE-NEXT: .word 2
+; CHECK-LE-NEXT: .word 4
+; CHECK-LE-NEXT: .word 8
+
+; CHECK-LE-LABEL: .LCPI1_0:
+; CHECK-LE-NEXT: .byte 1
+; CHECK-LE-NEXT: .byte 2
+; CHECK-LE-NEXT: .byte 4
+; CHECK-LE-NEXT: .byte 8
+; CHECK-LE-NEXT: .byte 16
+; CHECK-LE-NEXT: .byte 32
+; CHECK-LE-NEXT: .byte 64
+; CHECK-LE-NEXT: .byte 128
+; CHECK-LE-NEXT: .byte 1
+; CHECK-LE-NEXT: .byte 2
+; CHECK-LE-NEXT: .byte 4
+; CHECK-LE-NEXT: .byte 8
+; CHECK-LE-NEXT: .byte 16
+; CHECK-LE-NEXT: .byte 32
+; CHECK-LE-NEXT: .byte 64
+; CHECK-LE-NEXT: .byte 128
+
+
+; Big endian
+
+; CHECK-BE-LABEL: .LCPI0_0:
+; CHECK-BE-NEXT: .word 8
+; CHECK-BE-NEXT: .word 4
+; CHECK-BE-NEXT: .word 2
+; CHECK-BE-NEXT: .word 1
+
+; CHECK-BE-LABEL: .LCPI1_0:
+; CHECK-BE-NEXT: .byte 128
+; CHECK-BE-NEXT: .byte 64
+; CHECK-BE-NEXT: .byte 32
+; CHECK-BE-NEXT: .byte 16
+; CHECK-BE-NEXT: .byte 8
+; CHECK-BE-NEXT: .byte 4
+; CHECK-BE-NEXT: .byte 2
+; CHECK-BE-NEXT: .byte 1
+; CHECK-BE-NEXT: .byte 128
+; CHECK-BE-NEXT: .byte 64
+; CHECK-BE-NEXT: .byte 32
+; CHECK-BE-NEXT: .byte 16
+; CHECK-BE-NEXT: .byte 8
+; CHECK-BE-NEXT: .byte 4
+; CHECK-BE-NEXT: .byte 2
+; CHECK-BE-NEXT: .byte 1



More information about the llvm-commits mailing list