[llvm] 376f414 - AArch64: Look through copies in CCMP converter.

via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 16 21:20:11 PDT 2026


Author: Peter Collingbourne
Date: 2026-03-16T21:20:06-07:00
New Revision: 376f41439375da3aa1ac095086abe0d1a4185c4a

URL: https://github.com/llvm/llvm-project/commit/376f41439375da3aa1ac095086abe0d1a4185c4a
DIFF: https://github.com/llvm/llvm-project/commit/376f41439375da3aa1ac095086abe0d1a4185c4a.diff

LOG: AArch64: Look through copies in CCMP converter.

The usual IR pattern for llvm.ptrauth.auth involves creating an
inttoptr for the auth operation to convert the result into a pointer.
CodeGenPrepare will copy these inttoptr operations into the user basic
blocks as this generally results in more efficient code. However, this is
not the case for the CCMP converter as it will encounter a COPY created
by the inttoptr and inhibit the optimization. Fix it by looking through
copies in the CCMP converter pass.

Assisted-by: gemini (wrote test)

Reviewers: davemgreen, fmayer, atrosinenko

Reviewed By: fmayer

Pull Request: https://github.com/llvm/llvm-project/pull/186842

Added: 
    llvm/test/CodeGen/AArch64/ccmp-look-through-copy.mir

Modified: 
    llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
index 058cae92de45b..06a88ba3da8dd 100644
--- a/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ConditionalCompares.cpp
@@ -209,6 +209,17 @@ class SSACCmpConv {
 };
 } // end anonymous namespace
 
+static Register lookThroughCopies(Register Reg, MachineRegisterInfo *MRI) {
+  MachineInstr *MI;
+  while ((MI = MRI->getUniqueVRegDef(Reg)) &&
+         MI->getOpcode() == TargetOpcode::COPY) {
+    if (MI->getOperand(1).getReg().isPhysical())
+      break;
+    Reg = MI->getOperand(1).getReg();
+  }
+  return Reg;
+}
+
 // Check that all PHIs in Tail are selecting the same value from Head and CmpBB.
 // This means that no if-conversion is required when merging CmpBB into Head.
 bool SSACCmpConv::trivialTailPHIs() {
@@ -219,7 +230,7 @@ bool SSACCmpConv::trivialTailPHIs() {
     // PHI operands come in (VReg, MBB) pairs.
     for (unsigned oi = 1, oe = I.getNumOperands(); oi != oe; oi += 2) {
       MachineBasicBlock *MBB = I.getOperand(oi + 1).getMBB();
-      Register Reg = I.getOperand(oi).getReg();
+      Register Reg = lookThroughCopies(I.getOperand(oi).getReg(), MRI);
       if (MBB == Head) {
         assert((!HeadReg || HeadReg == Reg) && "Inconsistent PHI operands");
         HeadReg = Reg;

diff  --git a/llvm/test/CodeGen/AArch64/ccmp-look-through-copy.mir b/llvm/test/CodeGen/AArch64/ccmp-look-through-copy.mir
new file mode 100644
index 0000000000000..8c0017ae1343c
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ccmp-look-through-copy.mir
@@ -0,0 +1,45 @@
+# RUN: llc -o - %s -mtriple=aarch64 -run-pass=aarch64-ccmp -aarch64-stress-ccmp | FileCheck %s
+---
+name: ccmp-look-through-copy
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: gpr64 }
+  - { id: 1, class: gpr64 }
+  - { id: 4, class: gpr64 }
+  - { id: 6, class: gpr64 }
+  - { id: 7, class: gpr64 }
+  - { id: 8, class: gpr64 }
+  - { id: 9, class: gpr64 }
+body: |
+  bb.0:
+    successors: %bb.1(0x40000000), %bb.2(0x40000000)
+    liveins: $x0, $x1
+  
+    ; CHECK-LABEL: name: ccmp-look-through-copy
+    ; CHECK: bb.0:
+    ; CHECK: CCMPXr
+
+    %0:gpr64 = COPY $x0
+    %1:gpr64 = COPY $x1
+    %4:gpr64 = COPY %0
+    %7:gpr64 = COPY %4
+    %6:gpr64 = SUBSXrr %0, %1, implicit-def $nzcv
+    Bcc 11, %bb.2, implicit $nzcv
+    B %bb.1
+  
+  bb.1:
+    successors: %bb.2(0x40000000), %bb.3(0x40000000)
+  
+    %8:gpr64 = SUBSXrr %1, %0, implicit-def $nzcv
+    Bcc 12, %bb.2, implicit $nzcv
+    B %bb.3
+  
+  bb.2:
+    %9:gpr64 = PHI %4, %bb.0, %7, %bb.1
+    $x0 = COPY %9
+    RET_ReallyLR implicit $x0
+  
+  bb.3:
+    $x0 = COPY %0
+    RET_ReallyLR implicit $x0
+...


        


More information about the llvm-commits mailing list