[llvm] [Aarch64] Materialize immediates with 64-bit ORR + EOR if shorter (PR #68287)

Dougall Johnson via llvm-commits llvm-commits at lists.llvm.org
Thu Oct 5 01:04:40 PDT 2023


https://github.com/dougallj created https://github.com/llvm/llvm-project/pull/68287

A number of useful constants can be encoded with a 64-bit ORR followed by a 64-bit EOR, including all remaining repeated byte patterns, some useful repeated 16-bit patterns, and some irregular masks. This patch prioritizes that encoding over three or four instruction encodings. Encoding with MOV + MOVK or ORR + MOVK is still preferred for fast literal generation and readability respectively.

The method devises three candidate values, and checks if both Candidate and (Imm ^ Candidate) are valid logical immediates. If so, Imm is materialized with:

```
ORR Xd, XZR, #(Imm ^ Candidate)
EOR Xd, Xd, #(Candidate)
```

The method has been exhaustively tested to ensure it can solve all possible values (excluding 0, ~0, and plain logical immediates, which are handled earlier).

>From 4c15b0c0846347a540d97a18a7d6e4e632975d48 Mon Sep 17 00:00:00 2001
From: Dougall Johnson <dougallj at gmail.com>
Date: Thu, 5 Oct 2023 18:53:40 +1100
Subject: [PATCH] [Aarch64] Materialize immediates with 64-bit ORR + EOR if
 shorter

A number of useful constants can be encoded with a 64-bit ORR followed
by a 64-bit EOR, including all remaining repeated byte patterns, some
useful repeated 16-bit patterns, and some irregular masks. This patch
prioritizes that encoding over three or four instruction encodings.
Encoding with MOV + MOVK or ORR + MOVK is still preferred for fast
literal generation and readability respectively.

The method devises three candidate values, and checks if both Candidate
and (Imm ^ Candidate) are valid logical immediates. If so, Imm is
materialized with:

```
ORR Xd, XZR, #(Imm ^ Candidate)
EOR Xd, Xd, #(Candidate)
```

The method has been exhaustively tested to ensure it can solve all
possible values (excluding 0, ~0, and plain logical immediates, which
are handled earlier).
---
 llvm/lib/Target/AArch64/AArch64ExpandImm.cpp  | 103 ++++++++++++++++++
 .../AArch64/AArch64ExpandPseudoInsts.cpp      |   1 +
 llvm/test/CodeGen/AArch64/arm64-movi.ll       |   9 ++
 3 files changed, 113 insertions(+)

diff --git a/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp b/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp
index 731972a039ba44c..6bcc4c515e725b4 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandImm.cpp
@@ -362,6 +362,105 @@ static bool tryAndOfLogicalImmediates(uint64_t UImm,
   return false;
 }
 
+// Check whether the constant can be represented by exclusive-or of two 64-bit
+// logical immediates. If so, materialize it with an ORR instruction followed
+// by an EOR instruction.
+//
+// This encoding allows all remaining repeated byte patterns, and many repeated
+// 16-bit values, to be encoded without needing four instructions. It can also
+// represent some irregular bitmasks (although those would mostly only need
+// three instructions otherwise).
+static bool tryEorTwoLogicalImm64s(uint64_t Imm,
+                                   SmallVectorImpl<ImmInsnModel> &Insn) {
+  // Determine the larger repetition size of the two possible logical
+  // immediates, by finding the repetition size of Imm.
+  unsigned BigSize = 64;
+
+  do {
+    BigSize /= 2;
+    uint64_t Mask = (1ULL << BigSize) - 1;
+
+    if ((Imm & Mask) != ((Imm >> BigSize) & Mask)) {
+      BigSize *= 2;
+      break;
+    }
+  } while (BigSize > 2);
+
+  uint64_t BigMask = ((uint64_t)-1LL) >> (64 - BigSize);
+
+  // Find the last bit of each run of ones, circularly. For runs which wrap
+  // around from bit 0 to bit 63, this is the bit before the most-significant
+  // zero, otherwise it is the least-significant bit in the run of ones.
+  uint64_t RunStarts = Imm & ~llvm::rotl<uint64_t>(Imm, 1);
+
+  // Find the smaller repetition size of the two possible logical immediates by
+  // counting the number of runs of one-bits within the BigSize-bit value. Both
+  // sizes may be the same. The EOR may add one or subtract one from the
+  // power-of-two count that can be represented by a logical immediate, or it
+  // may be left unchanged.
+  int RunsPerBigChunk = llvm::popcount(RunStarts & BigMask);
+
+  static const int8_t BigToSmallSizeTable[32] = {
+      -1, -1, 0,  1,  2,  2,  -1, 3,  3,  3,  -1, -1, -1, -1, -1, 4,
+      4,  4,  -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5,
+  };
+
+  int BigToSmallShift = BigToSmallSizeTable[RunsPerBigChunk];
+
+  // Early-exit if the big chunk couldn't be a power-of-two number of runs
+  // EORed with another single run.
+  if (BigToSmallShift == -1)
+    return false;
+
+  unsigned SmallSize = BigSize >> BigToSmallShift;
+
+  // 64-bit values with a bit set every (1 << index) bits.
+  static const uint64_t RepeatedOnesTable[] = {
+      0xffffffffffffffff, 0x5555555555555555, 0x1111111111111111,
+      0x0101010101010101, 0x0001000100010001, 0x0000000100000001,
+      0x0000000000000001,
+  };
+
+  // This RepeatedOnesTable lookup is a faster implementation of the division
+  // 0xffffffffffffffff / ((1 << SmallSize) - 1), and can be thought of as
+  // dividing the 64-bit value into fields of width SmallSize, and placing a
+  // one in the least significant bit of each field.
+  uint64_t SmallOnes = RepeatedOnesTable[llvm::countr_zero(SmallSize)];
+
+  // Now we try to find the number of ones in each of the smaller repetitions,
+  // by looking at runs of ones in Imm. This can take three attempts, as the
+  // EOR may have changed the length of the first two runs we find.
+
+  // Rotate a run of ones so we can count it in the trailing bits.
+  int Rotation = llvm::countr_zero(RunStarts);
+  uint64_t RotatedImm = llvm::rotr<uint64_t>(Imm, Rotation);
+  for (int Attempt = 0; Attempt < 3; ++Attempt) {
+    unsigned RunLength = llvm::countr_one(RotatedImm);
+
+    // Construct candidate values BigImm and SmallImm, such that if these two
+    // values are encodable, we have a solution. (SmallImm is constructed to be
+    // encodable, but this isn't guaranteed when RunLength >= SmallSize)
+    uint64_t SmallImm =
+        llvm::rotl<uint64_t>((SmallOnes << RunLength) - SmallOnes, Rotation);
+    uint64_t BigImm = Imm ^ SmallImm;
+
+    uint64_t BigEncoding = 0;
+    uint64_t SmallEncoding = 0;
+    if (AArch64_AM::processLogicalImmediate(BigImm, 64, BigEncoding) &&
+        AArch64_AM::processLogicalImmediate(SmallImm, 64, SmallEncoding)) {
+      Insn.push_back({AArch64::ORRXri, 0, SmallEncoding});
+      Insn.push_back({AArch64::EORXri, 1, BigEncoding});
+      return true;
+    }
+
+    // Rotate to the next run of ones
+    Rotation += llvm::countr_zero(llvm::rotr<uint64_t>(RunStarts, Rotation) & ~1);
+    RotatedImm = llvm::rotr<uint64_t>(Imm, Rotation);
+  }
+
+  return false;
+}
+
 /// \brief Expand a MOVi32imm or MOVi64imm pseudo instruction to a
 /// MOVZ or MOVN of width BitSize followed by up to 3 MOVK instructions.
 static inline void expandMOVImmSimple(uint64_t Imm, unsigned BitSize,
@@ -503,6 +602,10 @@ void AArch64_IMM::expandMOVImm(uint64_t Imm, unsigned BitSize,
   if (tryAndOfLogicalImmediates(Imm, Insn))
     return;
 
+  // Attempt to use a sequence of ORR-immediate followed by EOR-immediate.
+  if (BitSize == 64 && tryEorTwoLogicalImm64s(UImm, Insn))
+    return;
+
   // FIXME: Add more two-instruction sequences.
 
   // Three instruction sequences.
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index cc61373d51d7188..38b5eeaf4057a7c 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -171,6 +171,7 @@ bool AArch64ExpandPseudo::expandMOVImm(MachineBasicBlock &MBB,
       }
       break;
     case AArch64::ANDXri:
+    case AArch64::EORXri:
       if (I->Op1 == 0) {
         MIBS.push_back(BuildMI(MBB, MBBI, MI.getDebugLoc(), TII->get(I->Opcode))
                            .add(MI.getOperand(0))
diff --git a/llvm/test/CodeGen/AArch64/arm64-movi.ll b/llvm/test/CodeGen/AArch64/arm64-movi.ll
index 2ec58dbee02325a..1d88f1ee2e7fb04 100644
--- a/llvm/test/CodeGen/AArch64/arm64-movi.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-movi.ll
@@ -432,3 +432,12 @@ define i64 @orr_64_orr_8() nounwind {
 ; CHECK-NEXT:    ret
   ret i64 -5764607889538110806
 }
+
+define i64 @orr_eor_2_8() nounwind {
+; CHECK-LABEL: orr_eor_2_8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    mov x0, #3689348814741910323
+; CHECK-NEXT:    eor x0, x0, #0x606060606060606
+; CHECK-NEXT:    ret
+  ret i64 3834029160418063669
+}



More information about the llvm-commits mailing list