[llvm-branch-commits] [llvm] [AArch64][PAC] Skip llvm.ptrauth.blend intrinsic in GVN PRE (PR #147815)
Anatoly Trosinenko via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Jul 22 04:27:48 PDT 2025
https://github.com/atrosinenko updated https://github.com/llvm/llvm-project/pull/147815
>From 37c2dda58d8f6e82113b92ddc2e499b3c5a50385 Mon Sep 17 00:00:00 2001
From: Anatoly Trosinenko <atrosinenko at accesssoftek.com>
Date: Wed, 9 Jul 2025 21:28:09 +0300
Subject: [PATCH 1/2] [AArch64][PAC] Precommit tests for handling ptrauth.blend
in GVN
---
.../ptrauth-discriminator-components.ll | 173 ++++++++++++++++++
1 file changed, 173 insertions(+)
create mode 100644 llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll b/llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll
new file mode 100644
index 0000000000000..efa2123807515
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll
@@ -0,0 +1,173 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -mtriple aarch64 -S -passes=gvn | FileCheck --check-prefix=GVN %s
+; RUN: opt < %s -mtriple aarch64 -S -passes='gvn,simplifycfg<hoist-common-insts>' | FileCheck --check-prefix=GVN-SCFG %s
+
+; When processing ptrauth.* intrinsics accepting a discriminator operand
+; on AArch64, the instruction selector tries to detect a common pattern of
+; the discriminator value being computed by a call to `blend(addr, imm)`.
+; In such case, a pseudo instruction is generated with `addr` and `imm` as
+; separate operands, which is not expanded until AsmPrinter. This way, it is
+; possible to enforce the immediate modifier, even if an attacker is able to
+; substitute the address modifier.
+;
+; While it should be more robust to use two separate arguments per discriminator
+; in any relevant intrinsic, a best-effort matching is currently performed by
+; the instruction selector. For that reason, it is important not to introduce
+; PHI nodes hiding the results of multiple identical blend operations.
+
+; In test_simple, four different signed values are stored into memory, but
+; the discriminators are pairwise equal and thus could be moved by GVN's
+; partial redundancy elimination.
+define void @test_simple(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i64 %b) {
+; GVN-LABEL: define void @test_simple(
+; GVN-SAME: i1 [[COND:%.*]], ptr [[STORAGE1:%.*]], ptr [[STORAGE2:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; GVN-NEXT: [[ENTRY:.*:]]
+; GVN-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
+; GVN-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
+; GVN-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[ENTRY_EXIT_CRIT_EDGE:.*]]
+; GVN: [[ENTRY_EXIT_CRIT_EDGE]]:
+; GVN-NEXT: [[DOTPRE:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-NEXT: [[DOTPRE1:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-NEXT: br label %[[EXIT:.*]]
+; GVN: [[IF_THEN]]:
+; GVN-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR1_THEN]])
+; GVN-NEXT: [[T2:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR2_THEN]])
+; GVN-NEXT: store volatile i64 [[T1]], ptr [[STORAGE1]], align 8
+; GVN-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
+; GVN-NEXT: br label %[[EXIT]]
+; GVN: [[EXIT]]:
+; GVN-NEXT: [[DISCR2_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE1]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR2_THEN]], %[[IF_THEN]] ]
+; GVN-NEXT: [[DISCR1_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR1_THEN]], %[[IF_THEN]] ]
+; GVN-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT_PRE_PHI]])
+; GVN-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT_PRE_PHI]])
+; GVN-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
+; GVN-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
+; GVN-NEXT: ret void
+;
+; GVN-SCFG-LABEL: define void @test_simple(
+; GVN-SCFG-SAME: i1 [[COND:%.*]], ptr [[STORAGE1:%.*]], ptr [[STORAGE2:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; GVN-SCFG-NEXT: [[ENTRY:.*:]]
+; GVN-SCFG-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
+; GVN-SCFG-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
+; GVN-SCFG-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-SCFG-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-SCFG-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[EXIT:.*]]
+; GVN-SCFG: [[IF_THEN]]:
+; GVN-SCFG-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR1_THEN]])
+; GVN-SCFG-NEXT: [[T2:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR2_THEN]])
+; GVN-SCFG-NEXT: store volatile i64 [[T1]], ptr [[STORAGE1]], align 8
+; GVN-SCFG-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
+; GVN-SCFG-NEXT: br label %[[EXIT]]
+; GVN-SCFG: [[EXIT]]:
+; GVN-SCFG-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_THEN]])
+; GVN-SCFG-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_THEN]])
+; GVN-SCFG-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
+; GVN-SCFG-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
+; GVN-SCFG-NEXT: ret void
+;
+entry:
+ %storage1.i = ptrtoint ptr %storage1 to i64
+ %storage2.i = ptrtoint ptr %storage2 to i64
+ br i1 %cond, label %if.then, label %exit
+
+if.then:
+ %discr1.then = call i64 @llvm.ptrauth.blend(i64 %storage1.i, i64 42)
+ %discr2.then = call i64 @llvm.ptrauth.blend(i64 %storage2.i, i64 42)
+ %t1 = call i64 @llvm.ptrauth.sign(i64 %a, i32 2, i64 %discr1.then)
+ %t2 = call i64 @llvm.ptrauth.sign(i64 %a, i32 2, i64 %discr2.then)
+ store volatile i64 %t1, ptr %storage1
+ store volatile i64 %t2, ptr %storage2
+ br label %exit
+
+exit:
+ %discr1.exit = call i64 @llvm.ptrauth.blend(i64 %storage1.i, i64 42)
+ %discr2.exit = call i64 @llvm.ptrauth.blend(i64 %storage2.i, i64 42)
+ %t3 = call i64 @llvm.ptrauth.sign(i64 %b, i32 2, i64 %discr1.exit)
+ %t4 = call i64 @llvm.ptrauth.sign(i64 %b, i32 2, i64 %discr2.exit)
+ store volatile i64 %t3, ptr %storage1
+ store volatile i64 %t4, ptr %storage2
+ ret void
+}
+
+; test_interleaved is similar to test_simple, but interleaving blend and sign
+; operations makes it harder for SimplifyCFG pass to hoist blends into the
+; entry basic block later and thus eliminate PHI nodes.
+define void @test_interleaved(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i64 %b) {
+; GVN-LABEL: define void @test_interleaved(
+; GVN-SAME: i1 [[COND:%.*]], ptr [[STORAGE1:%.*]], ptr [[STORAGE2:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; GVN-NEXT: [[ENTRY:.*:]]
+; GVN-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
+; GVN-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
+; GVN-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[ENTRY_EXIT_CRIT_EDGE:.*]]
+; GVN: [[ENTRY_EXIT_CRIT_EDGE]]:
+; GVN-NEXT: [[DOTPRE:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-NEXT: [[DOTPRE1:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-NEXT: br label %[[EXIT:.*]]
+; GVN: [[IF_THEN]]:
+; GVN-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR1_THEN]])
+; GVN-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-NEXT: [[T2:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR2_THEN]])
+; GVN-NEXT: store volatile i64 [[T1]], ptr [[STORAGE1]], align 8
+; GVN-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
+; GVN-NEXT: br label %[[EXIT]]
+; GVN: [[EXIT]]:
+; GVN-NEXT: [[DISCR2_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE1]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR2_THEN]], %[[IF_THEN]] ]
+; GVN-NEXT: [[DISCR1_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR1_THEN]], %[[IF_THEN]] ]
+; GVN-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT_PRE_PHI]])
+; GVN-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT_PRE_PHI]])
+; GVN-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
+; GVN-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
+; GVN-NEXT: ret void
+;
+; GVN-SCFG-LABEL: define void @test_interleaved(
+; GVN-SCFG-SAME: i1 [[COND:%.*]], ptr [[STORAGE1:%.*]], ptr [[STORAGE2:%.*]], i64 [[A:%.*]], i64 [[B:%.*]]) {
+; GVN-SCFG-NEXT: [[ENTRY:.*:]]
+; GVN-SCFG-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
+; GVN-SCFG-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
+; GVN-SCFG-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-SCFG-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[ENTRY_EXIT_CRIT_EDGE:.*]]
+; GVN-SCFG: [[ENTRY_EXIT_CRIT_EDGE]]:
+; GVN-SCFG-NEXT: [[DOTPRE1:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-SCFG-NEXT: br label %[[EXIT:.*]]
+; GVN-SCFG: [[IF_THEN]]:
+; GVN-SCFG-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR1_THEN]])
+; GVN-SCFG-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-SCFG-NEXT: [[T2:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR2_THEN]])
+; GVN-SCFG-NEXT: store volatile i64 [[T1]], ptr [[STORAGE1]], align 8
+; GVN-SCFG-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
+; GVN-SCFG-NEXT: br label %[[EXIT]]
+; GVN-SCFG: [[EXIT]]:
+; GVN-SCFG-NEXT: [[DISCR2_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE1]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR2_THEN]], %[[IF_THEN]] ]
+; GVN-SCFG-NEXT: [[DISCR1_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DISCR1_THEN]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR1_THEN]], %[[IF_THEN]] ]
+; GVN-SCFG-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT_PRE_PHI]])
+; GVN-SCFG-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT_PRE_PHI]])
+; GVN-SCFG-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
+; GVN-SCFG-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
+; GVN-SCFG-NEXT: ret void
+;
+entry:
+ %storage1.i = ptrtoint ptr %storage1 to i64
+ %storage2.i = ptrtoint ptr %storage2 to i64
+ br i1 %cond, label %if.then, label %exit
+
+if.then:
+ %discr1.then = call i64 @llvm.ptrauth.blend(i64 %storage1.i, i64 42)
+ %t1 = call i64 @llvm.ptrauth.sign(i64 %a, i32 2, i64 %discr1.then)
+ %discr2.then = call i64 @llvm.ptrauth.blend(i64 %storage2.i, i64 42)
+ %t2 = call i64 @llvm.ptrauth.sign(i64 %a, i32 2, i64 %discr2.then)
+ store volatile i64 %t1, ptr %storage1
+ store volatile i64 %t2, ptr %storage2
+ br label %exit
+
+exit:
+ %discr1.exit = call i64 @llvm.ptrauth.blend(i64 %storage1.i, i64 42)
+ %t3 = call i64 @llvm.ptrauth.sign(i64 %b, i32 2, i64 %discr1.exit)
+ %discr2.exit = call i64 @llvm.ptrauth.blend(i64 %storage2.i, i64 42)
+ %t4 = call i64 @llvm.ptrauth.sign(i64 %b, i32 2, i64 %discr2.exit)
+ store volatile i64 %t3, ptr %storage1
+ store volatile i64 %t4, ptr %storage2
+ ret void
+}
>From beee6727fcaf18520ef0f0c035d9d3532bb3a96f Mon Sep 17 00:00:00 2001
From: Anatoly Trosinenko <atrosinenko at accesssoftek.com>
Date: Tue, 8 Jul 2025 23:51:10 +0300
Subject: [PATCH 2/2] [AArch64][PAC] Skip llvm.ptrauth.blend intrinsic in GVN
PRE
The instruction selector on AArch64 implements a best-effort heuristic
to detect the discriminator being computed by llvm.ptrauth.blend
intrinsic. If such pattern is detected, then address and immediate
discriminator components are emitted as two separate operands of the
corresponding pseudo instruction, which is not expanded until
AsmPrinter. This helps enforcing the hard-coded immediate modifier even
when the address part of the discriminator can be modified by an
attacker, something along the lines
mov x8, x20
movk x8, #1234, #48
pacda x0, x8
// ...
bl callee
mov x8, x20 // address in x20 can be modified
movk x8, #1234, #48 // immediate modifier is enforced
pacda x0, x8
instead of reloading a previously computed discriminator value from the
stack (can be modified by an attacker under Pointer Authentication
threat model) or keeping it in a callee-saved register (may be spilled
to the stack in callee):
movk x20, #1234, #48
pacda x0, x20
// ...
bl callee
pacda x0, x20 // the entire discriminator can be modified
---
llvm/lib/Transforms/Scalar/GVN.cpp | 7 +++
.../ptrauth-discriminator-components.ll | 53 ++++++++-----------
2 files changed, 29 insertions(+), 31 deletions(-)
diff --git a/llvm/lib/Transforms/Scalar/GVN.cpp b/llvm/lib/Transforms/Scalar/GVN.cpp
index f6bf09d09433d..d306342225b5c 100644
--- a/llvm/lib/Transforms/Scalar/GVN.cpp
+++ b/llvm/lib/Transforms/Scalar/GVN.cpp
@@ -2993,6 +2993,13 @@ bool GVNPass::performScalarPRE(Instruction *CurInst) {
if (isa<GetElementPtrInst>(CurInst))
return false;
+ // Don't do PRE on ptrauth_blend intrinsic: on AArch64 the instruction
+ // selector wants to take its operands into account when selecting the user
+ // of the blended discriminator, so don't hide the blend behind PHI nodes.
+ if (auto *II = dyn_cast<IntrinsicInst>(CurInst))
+ if (II->getIntrinsicID() == Intrinsic::ptrauth_blend)
+ return false;
+
if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
// We don't currently value number ANY inline asm calls.
if (CallB->isInlineAsm())
diff --git a/llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll b/llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll
index efa2123807515..1987f58b27c72 100644
--- a/llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll
+++ b/llvm/test/CodeGen/AArch64/ptrauth-discriminator-components.ll
@@ -24,11 +24,7 @@ define void @test_simple(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i64 %b)
; GVN-NEXT: [[ENTRY:.*:]]
; GVN-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
; GVN-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
-; GVN-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[ENTRY_EXIT_CRIT_EDGE:.*]]
-; GVN: [[ENTRY_EXIT_CRIT_EDGE]]:
-; GVN-NEXT: [[DOTPRE:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
-; GVN-NEXT: [[DOTPRE1:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
-; GVN-NEXT: br label %[[EXIT:.*]]
+; GVN-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[EXIT:.*]]
; GVN: [[IF_THEN]]:
; GVN-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
; GVN-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
@@ -38,10 +34,10 @@ define void @test_simple(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i64 %b)
; GVN-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
; GVN-NEXT: br label %[[EXIT]]
; GVN: [[EXIT]]:
-; GVN-NEXT: [[DISCR2_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE1]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR2_THEN]], %[[IF_THEN]] ]
-; GVN-NEXT: [[DISCR1_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR1_THEN]], %[[IF_THEN]] ]
-; GVN-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT_PRE_PHI]])
-; GVN-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT_PRE_PHI]])
+; GVN-NEXT: [[DISCR1_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-NEXT: [[DISCR2_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT]])
+; GVN-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT]])
; GVN-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
; GVN-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
; GVN-NEXT: ret void
@@ -51,18 +47,20 @@ define void @test_simple(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i64 %b)
; GVN-SCFG-NEXT: [[ENTRY:.*:]]
; GVN-SCFG-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
; GVN-SCFG-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
-; GVN-SCFG-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
-; GVN-SCFG-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
; GVN-SCFG-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[EXIT:.*]]
; GVN-SCFG: [[IF_THEN]]:
+; GVN-SCFG-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-SCFG-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
; GVN-SCFG-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR1_THEN]])
; GVN-SCFG-NEXT: [[T2:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR2_THEN]])
; GVN-SCFG-NEXT: store volatile i64 [[T1]], ptr [[STORAGE1]], align 8
; GVN-SCFG-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
; GVN-SCFG-NEXT: br label %[[EXIT]]
; GVN-SCFG: [[EXIT]]:
-; GVN-SCFG-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_THEN]])
-; GVN-SCFG-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_THEN]])
+; GVN-SCFG-NEXT: [[DISCR1_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-SCFG-NEXT: [[DISCR2_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-SCFG-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT]])
+; GVN-SCFG-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT]])
; GVN-SCFG-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
; GVN-SCFG-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
; GVN-SCFG-NEXT: ret void
@@ -100,11 +98,7 @@ define void @test_interleaved(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i6
; GVN-NEXT: [[ENTRY:.*:]]
; GVN-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
; GVN-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
-; GVN-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[ENTRY_EXIT_CRIT_EDGE:.*]]
-; GVN: [[ENTRY_EXIT_CRIT_EDGE]]:
-; GVN-NEXT: [[DOTPRE:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
-; GVN-NEXT: [[DOTPRE1:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
-; GVN-NEXT: br label %[[EXIT:.*]]
+; GVN-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[EXIT:.*]]
; GVN: [[IF_THEN]]:
; GVN-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
; GVN-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR1_THEN]])
@@ -114,10 +108,10 @@ define void @test_interleaved(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i6
; GVN-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
; GVN-NEXT: br label %[[EXIT]]
; GVN: [[EXIT]]:
-; GVN-NEXT: [[DISCR2_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE1]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR2_THEN]], %[[IF_THEN]] ]
-; GVN-NEXT: [[DISCR1_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR1_THEN]], %[[IF_THEN]] ]
-; GVN-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT_PRE_PHI]])
-; GVN-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT_PRE_PHI]])
+; GVN-NEXT: [[DISCR1_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT]])
+; GVN-NEXT: [[DISCR2_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT]])
; GVN-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
; GVN-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
; GVN-NEXT: ret void
@@ -127,12 +121,9 @@ define void @test_interleaved(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i6
; GVN-SCFG-NEXT: [[ENTRY:.*:]]
; GVN-SCFG-NEXT: [[STORAGE1_I:%.*]] = ptrtoint ptr [[STORAGE1]] to i64
; GVN-SCFG-NEXT: [[STORAGE2_I:%.*]] = ptrtoint ptr [[STORAGE2]] to i64
-; GVN-SCFG-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
-; GVN-SCFG-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[ENTRY_EXIT_CRIT_EDGE:.*]]
-; GVN-SCFG: [[ENTRY_EXIT_CRIT_EDGE]]:
-; GVN-SCFG-NEXT: [[DOTPRE1:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
-; GVN-SCFG-NEXT: br label %[[EXIT:.*]]
+; GVN-SCFG-NEXT: br i1 [[COND]], label %[[IF_THEN:.*]], label %[[EXIT:.*]]
; GVN-SCFG: [[IF_THEN]]:
+; GVN-SCFG-NEXT: [[DISCR1_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
; GVN-SCFG-NEXT: [[T1:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR1_THEN]])
; GVN-SCFG-NEXT: [[DISCR2_THEN:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
; GVN-SCFG-NEXT: [[T2:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[A]], i32 2, i64 [[DISCR2_THEN]])
@@ -140,10 +131,10 @@ define void @test_interleaved(i1 %cond, ptr %storage1, ptr %storage2, i64 %a, i6
; GVN-SCFG-NEXT: store volatile i64 [[T2]], ptr [[STORAGE2]], align 8
; GVN-SCFG-NEXT: br label %[[EXIT]]
; GVN-SCFG: [[EXIT]]:
-; GVN-SCFG-NEXT: [[DISCR2_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DOTPRE1]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR2_THEN]], %[[IF_THEN]] ]
-; GVN-SCFG-NEXT: [[DISCR1_EXIT_PRE_PHI:%.*]] = phi i64 [ [[DISCR1_THEN]], %[[ENTRY_EXIT_CRIT_EDGE]] ], [ [[DISCR1_THEN]], %[[IF_THEN]] ]
-; GVN-SCFG-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT_PRE_PHI]])
-; GVN-SCFG-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT_PRE_PHI]])
+; GVN-SCFG-NEXT: [[DISCR1_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE1_I]], i64 42)
+; GVN-SCFG-NEXT: [[T3:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR1_EXIT]])
+; GVN-SCFG-NEXT: [[DISCR2_EXIT:%.*]] = call i64 @llvm.ptrauth.blend(i64 [[STORAGE2_I]], i64 42)
+; GVN-SCFG-NEXT: [[T4:%.*]] = call i64 @llvm.ptrauth.sign(i64 [[B]], i32 2, i64 [[DISCR2_EXIT]])
; GVN-SCFG-NEXT: store volatile i64 [[T3]], ptr [[STORAGE1]], align 8
; GVN-SCFG-NEXT: store volatile i64 [[T4]], ptr [[STORAGE2]], align 8
; GVN-SCFG-NEXT: ret void
More information about the llvm-branch-commits
mailing list