[llvm] 10b93a5 - [AArch64] Make speculation-hardening-sls.ll x16 test more robust

David Green via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 5 02:32:35 PDT 2021


Author: David Green
Date: 2021-10-05T10:32:30+01:00
New Revision: 10b93a5deceaa7d7daae50f12bc59f49b47680fd

URL: https://github.com/llvm/llvm-project/commit/10b93a5deceaa7d7daae50f12bc59f49b47680fd
DIFF: https://github.com/llvm/llvm-project/commit/10b93a5deceaa7d7daae50f12bc59f49b47680fd.diff

LOG: [AArch64] Make speculation-hardening-sls.ll x16 test more robust

As suggested in D110830, this copies the Arm backend method of testing
function calls through specific registers, using inline assembly to
force the variable into x16 to check that the __llvm_slsblr_thunk calls
do not use a register that may be clobbered by the linker.

Differential Revision: https://reviews.llvm.org/D111056

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
index 89f2fba022b7..83793f2caa15 100644
--- a/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
+++ b/llvm/test/CodeGen/AArch64/speculation-hardening-sls.ll
@@ -146,26 +146,19 @@ entry:
 
 ; Verify that neither x16 nor x17 are used when the BLR mitigation is enabled,
 ; as a linker is allowed to clobber x16 or x17 on calls, which would break the
-; correct execution of the code sequence produced by the mitigation.
-; The below test carefully increases register pressure to persuade code
-; generation to produce a BLR x16. Yes, that is a bit fragile.
-define i64 @check_x16(i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** nocapture readonly %fp2) "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x30,+reserve-x9" {
+; correct execution of the code sequence produced by the mitigation. The below
+; test attempts to force *%f into x16 using inline assembly.
+define i64 @check_x16(i64 ()** nocapture readonly %fp, i64 ()** nocapture readonly %fp2) "target-features"="+neon,+reserve-x10,+reserve-x11,+reserve-x12,+reserve-x13,+reserve-x14,+reserve-x15,+reserve-x18,+reserve-x20,+reserve-x21,+reserve-x22,+reserve-x23,+reserve-x24,+reserve-x25,+reserve-x26,+reserve-x27,+reserve-x28,+reserve-x30,+reserve-x9" {
 entry:
 ; CHECK-LABEL: check_x16:
-  %0 = load i64 (i8*, i64, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp, align 8
-  %1 = bitcast i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp2 to i8**
-  %2 = load i8*, i8** %1, align 8
-  %call = call i64 %0(i8* %2, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
-  %3 = load i64 (i8*, i64, i64, i64, i64, i64, i64, i64)*, i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp2, align 8
-  %4 = bitcast i64 (i8*, i64, i64, i64, i64, i64, i64, i64)** %fp to i8**
-  %5 = load i8*, i8** %4, align 8;, !tbaa !2
-  %call1 = call i64 %3(i8* %5, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0, i64 0)
+  %f = load i64 ()*, i64 ()** %fp, align 8
+  %x16_f = tail call i64 ()* asm "add $0, $1, #0", "={x16},{x16}"(i64 ()* %f) nounwind
+  %call1 = call i64 %x16_f()
 ; NOHARDEN:   blr x16
 ; ISBDSB-NOT: bl __llvm_slsblr_thunk_x16
 ; SB-NOT:     bl __llvm_slsblr_thunk_x16
 ; CHECK
-  %add = add nsw i64 %call1, %call
-  ret i64 %add
+  ret i64 %call1
 ; CHECK: .Lfunc_end
 }
 


        


More information about the llvm-commits mailing list