[llvm] [PAC][InstCombine] Replace auth+sign with resign (PR #130807)
Anatoly Trosinenko via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 1 09:06:05 PDT 2025
atrosinenko wrote:
Ping.
Considering the above comments, I see several independent questions:
* Is InstCombine pass the right place to fix the issue (possibility of unsafe pointer re-signing due to spilling intermediate authenticated value)?
- if it is, should any warning be emitted about the frontend producing an unsafe LLVM IR?
- if it is not, is it the right place for temporary workaround?
* As with any PR: in case of InstCombine pass being the right place, is the particular implementation good enough?
---
Note on the above example of seemingly missed transformation:
```c
void* f3(void *p) {
void* authed = __builtin_ptrauth_auth(p, 2, 1234);
__asm__(""::"m"(authed));
return __builtin_ptrauth_sign_unauthenticated(authed, 3, 42);
}
```
It is `"m"` input operand modifier that prevents instruction combining. I'm not very familiar with the precise semantics of this modifier and whether it is absolutely necessary for the frontend to emit it like this
```llvm
store ptr %2, ptr %authed, align 8, !tbaa !9
call void asm sideeffect "", "*m"(ptr nonnull elementtype(ptr) %authed) #4, !srcloc !13
%3 = load ptr, ptr %authed, align 8, !tbaa !9
```
For me, this looks like passing a pointer as input to the asm snippet, but with the pointee being in-out. If I use `"r"` modifier (and mark this inline asm as `volatile`) merely to force computation of the intermediate value, then instruction combining occurs as expected.
<details>
Updated source:
```c
void* f3(void *p) {
void* authed = __builtin_ptrauth_auth(p, 2, 1234);
__asm__ volatile(""::"r"(authed));
return __builtin_ptrauth_sign_unauthenticated(authed, 3, 42);
}
```
LLVM IR before the first `instcombine` pass (at `-O2` optimization level):
```llvm
define dso_local ptr @f3(ptr noundef %p) local_unnamed_addr #0 {
entry:
%0 = ptrtoint ptr %p to i64
%1 = call i64 @llvm.ptrauth.auth(i64 %0, i32 2, i64 1234)
%2 = inttoptr i64 %1 to ptr
call void asm sideeffect "", "r"(ptr %2) #3, !srcloc !9
%3 = call i64 @llvm.ptrauth.sign(i64 %1, i32 3, i64 42)
%4 = inttoptr i64 %3 to ptr
ret ptr %4
}
```
LLVM IR after the first `instcombine` pass:
```llvm
define dso_local ptr @f3(ptr noundef %p) local_unnamed_addr #0 {
entry:
%0 = ptrtoint ptr %p to i64
%1 = call i64 @llvm.ptrauth.auth(i64 %0, i32 2, i64 1234)
%2 = inttoptr i64 %1 to ptr
call void asm sideeffect "", "r"(ptr %2) #4, !srcloc !9
%3 = call i64 @llvm.ptrauth.resign(i64 %0, i32 2, i64 1234, i32 3, i64 42)
%4 = inttoptr i64 %3 to ptr
ret ptr %4
}
```
MIR right before AsmPrinter:
```
bb.0.entry:
liveins: $x0
$x16 = ORRXrs $xzr, $x0, 0
AUT 2, 1234, $xzr, implicit-def $x16, implicit-def dead $x17, implicit-def dead $nzcv, implicit killed $x16
$x8 = ORRXrs $xzr, killed $x16, 0
$x16 = ORRXrs $xzr, killed $x0, 0
INLINEASM &"" [sideeffect] [attdialect], $0:[reguse:GPR64common], killed renamable $x8, !9
AUTPAC 2, 1234, $xzr, 3, 42, $xzr, implicit-def $x16, implicit-def dead $x17, implicit-def dead $nzcv, implicit killed $x16
$x0 = ORRXrs $xzr, killed $x16, 0
RET undef $lr, implicit killed $x0
```
Final result:
```
f3: // @f3
.cfi_startproc
// %bb.0: // %entry
mov x16, x0
mov x17, #1234 // =0x4d2
autda x16, x17
mov x17, x16
xpacd x17
cmp x16, x17
b.eq .Lauth_success_0
brk #0xc472
.Lauth_success_0:
mov x8, x16
mov x16, x0
//APP
//NO_APP
mov x17, #1234 // =0x4d2
autda x16, x17
mov x17, x16
xpacd x17
cmp x16, x17
b.eq .Lauth_success_1
brk #0xc472
.Lauth_success_1:
mov x17, #42 // =0x2a
pacdb x16, x17
mov x0, x16
ret
```
Meaning the authentication is redone after inline asm snippet.
</details>
https://github.com/llvm/llvm-project/pull/130807
More information about the llvm-commits
mailing list