[llvm] 5e612bc - [AArch64] Make stack tagging compatible with SLH

Kristof Beyls via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 20 07:54:23 PDT 2023


Author: Kristof Beyls
Date: 2023-04-20T16:53:51+02:00
New Revision: 5e612bc291347d364f1d47c37f0d34eb6474b9b5

URL: https://github.com/llvm/llvm-project/commit/5e612bc291347d364f1d47c37f0d34eb6474b9b5
DIFF: https://github.com/llvm/llvm-project/commit/5e612bc291347d364f1d47c37f0d34eb6474b9b5.diff

LOG: [AArch64] Make stack tagging compatible with SLH

See https://github.com/llvm/llvm-project/issues/61830

Speculative Load Hardening (SLH) requires that conditional branches are
implemented using B.cc branches, not using {TC}B{N}Z branches.

Stack tagging was expanding one of the pseudo instructions it uses to
cbnz. This commit changes that to use b.ne instead.

Note that the added regression test was added to settag.ll rather than
speculation-hardening.ll. The speculation-hardening.ll tests also check
that all tests in the file work with global-isel. It seems that stack
tagging does not work yet with global-isel. Therefore, testing the
combination of stack tagging and SLH hard to be added to a test file
that doesn't test global-isel, i.e. settag.ll

Differential Revision: https://reviews.llvm.org/D148508

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
    llvm/test/CodeGen/AArch64/settag-merge-order.ll
    llvm/test/CodeGen/AArch64/settag-merge.ll
    llvm/test/CodeGen/AArch64/settag.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index a413c924c862a..eaf5775eb971a 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -706,12 +706,15 @@ bool AArch64ExpandPseudo::expandSetTagLoop(
       .addImm(2)
       .cloneMemRefs(MI)
       .setMIFlags(MI.getFlags());
-  BuildMI(LoopBB, DL, TII->get(AArch64::SUBXri))
+  BuildMI(LoopBB, DL, TII->get(AArch64::SUBSXri))
       .addDef(SizeReg)
       .addReg(SizeReg)
       .addImm(16 * 2)
       .addImm(0);
-  BuildMI(LoopBB, DL, TII->get(AArch64::CBNZX)).addUse(SizeReg).addMBB(LoopBB);
+  BuildMI(LoopBB, DL, TII->get(AArch64::Bcc))
+      .addImm(AArch64CC::NE)
+      .addMBB(LoopBB)
+      .addReg(AArch64::NZCV, RegState::Implicit | RegState::Kill);
 
   LoopBB->addSuccessor(LoopBB);
   LoopBB->addSuccessor(DoneBB);

diff  --git a/llvm/test/CodeGen/AArch64/settag-merge-order.ll b/llvm/test/CodeGen/AArch64/settag-merge-order.ll
index e974a490a1717..ec13a7c99ad42 100644
--- a/llvm/test/CodeGen/AArch64/settag-merge-order.ll
+++ b/llvm/test/CodeGen/AArch64/settag-merge-order.ll
@@ -11,8 +11,8 @@ entry:
 ; CHECK-LABEL: stg128_128_gap_128_128:
 ; CHECK: mov     x8, #512
 ; CHECK: st2g    sp, [sp], #32
-; CHECK: sub     x8, x8, #32
-; CHECK: cbnz    x8,
+; CHECK: subs    x8, x8, #32
+; CHECK: b.ne
 ; CHECK: ret
   %a = alloca i8, i32 128, align 16
   %a2 = alloca i8, i32 128, align 16
@@ -40,18 +40,18 @@ entry:
 
 if.then:
 ; CHECK: mov     x8, #320
-; CHECK: sub     x8, x8, #32
+; CHECK: subs    x8, x8, #32
 ; CHECK: st2g    x9, [x9], #32
-; CHECK: cbnz    x8,
+; CHECK: b.ne
   call void @llvm.aarch64.settag(ptr %a, i64 160)
   call void @llvm.aarch64.settag(ptr %a2, i64 160)
   br label %if.end
 
 if.else:
 ; CHECK: mov     x8, #256
-; CHECK: sub     x8, x8, #32
+; CHECK: subs    x8, x8, #32
 ; CHECK: st2g    x9, [x9], #32
-; CHECK: cbnz    x8,
+; CHECK: b.ne
   call void @llvm.aarch64.settag(ptr %c, i64 128)
   call void @llvm.aarch64.settag(ptr %c2, i64 128)
   br label %if.end
@@ -59,8 +59,8 @@ if.else:
 if.end:
 ; CHECK: mov     x8, #576
 ; CHECK: st2g    sp, [sp], #32
-; CHECK: sub     x8, x8, #32
-; CHECK: cbnz    x8,
+; CHECK: subs    x8, x8, #32
+; CHECK: b.ne
   call void @llvm.aarch64.settag(ptr %a, i64 160)
   call void @llvm.aarch64.settag(ptr %a2, i64 160)
   call void @llvm.aarch64.settag(ptr %c, i64 128)

diff  --git a/llvm/test/CodeGen/AArch64/settag-merge.ll b/llvm/test/CodeGen/AArch64/settag-merge.ll
index 591c76f5728b9..50cc1fd43227d 100644
--- a/llvm/test/CodeGen/AArch64/settag-merge.ll
+++ b/llvm/test/CodeGen/AArch64/settag-merge.ll
@@ -56,8 +56,8 @@ entry:
 ; CHECK-LABEL: stg128_128_128_128:
 ; CHECK: mov     x8, #512
 ; CHECK: st2g    sp, [sp], #32
-; CHECK: sub     x8, x8, #32
-; CHECK: cbnz    x8,
+; CHECK: subs    x8, x8, #32
+; CHECK: b.ne
 ; CHECK: ret
   %a = alloca i8, i32 128, align 16
   %b = alloca i8, i32 128, align 16
@@ -75,8 +75,8 @@ entry:
 ; CHECK-LABEL: stg16_512_16:
 ; CHECK: mov     x8, #544
 ; CHECK: st2g    sp, [sp], #32
-; CHECK: sub     x8, x8, #32
-; CHECK: cbnz    x8,
+; CHECK: subs    x8, x8, #32
+; CHECK: b.ne
 ; CHECK: ret
   %a = alloca i8, i32 16, align 16
   %b = alloca i8, i32 512, align 16
@@ -92,8 +92,8 @@ entry:
 ; CHECK-LABEL: stg512_512_512:
 ; CHECK: mov     x8, #1536
 ; CHECK: st2g    sp, [sp], #32
-; CHECK: sub     x8, x8, #32
-; CHECK: cbnz    x8,
+; CHECK: subs    x8, x8, #32
+; CHECK: b.ne
 ; CHECK: ret
   %a = alloca i8, i32 512, align 16
   %b = alloca i8, i32 512, align 16
@@ -136,9 +136,9 @@ entry:
 ; CHECK: tbz   w0, #0, [[LABEL:.LBB.*]]
 ; CHECK: add   x9, sp, #
 ; CHECK: mov   x8, #256
-; CHECK: sub   x8, x8, #32
+; CHECK: subs  x8, x8, #32
 ; CHECK: st2g  x9, [x9], #32
-; CHECK: cbnz  x8,
+; CHECK: b.ne
 ; CHECK: [[LABEL]]:
 ; CHECK: stg     sp, [sp, #
 ; CHECK: st2g    sp, [sp], #
@@ -164,9 +164,9 @@ entry:
 ; CHECK: tbz   w0, #0, [[LABEL:.LBB.*]]
 ; CHECK: add   x9, sp, #
 ; CHECK: mov   x8, #1024
-; CHECK: sub   x8, x8, #32
+; CHECK: subs  x8, x8, #32
 ; CHECK: st2g  x9, [x9], #32
-; CHECK: cbnz  x8,
+; CHECK: b.ne
 ; CHECK: [[LABEL]]:
 ; CHECK: stg     sp, [sp, #
 ; CHECK: st2g    sp, [sp], #
@@ -192,13 +192,13 @@ entry:
 ; CHECK-LABEL: stg128_128_gap_128_128:
 ; CHECK: mov     x9, sp
 ; CHECK: mov     x8, #256
-; CHECK: sub     x8, x8, #32
+; CHECK: subs    x8, x8, #32
 ; CHECK: st2g    x9, [x9], #32
-; CHECK: cbnz    x8,
+; CHECK: b.ne
 ; CHECK: mov     x8, #256
 ; CHECK: st2g    sp, [sp], #32
-; CHECK: sub     x8, x8, #32
-; CHECK: cbnz    x8,
+; CHECK: subs    x8, x8, #32
+; CHECK: b.ne
 ; CHECK: ret
   %a = alloca i8, i32 128, align 16
   %a2 = alloca i8, i32 128, align 16

diff  --git a/llvm/test/CodeGen/AArch64/settag.ll b/llvm/test/CodeGen/AArch64/settag.ll
index 60712eac693ea..747a21a60241f 100644
--- a/llvm/test/CodeGen/AArch64/settag.ll
+++ b/llvm/test/CodeGen/AArch64/settag.ll
@@ -61,9 +61,9 @@ define void @stg16(ptr %p) {
 ; CHECK-NEXT:    mov x8, #256
 ; CHECK-NEXT:  .LBB5_1: // %entry
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    sub x8, x8, #32
+; CHECK-NEXT:    subs x8, x8, #32
 ; CHECK-NEXT:    st2g x0, [x0], #32
-; CHECK-NEXT:    cbnz x8, .LBB5_1
+; CHECK-NEXT:    b.ne .LBB5_1
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    ret
 entry:
@@ -78,9 +78,9 @@ define void @stg17(ptr %p) {
 ; CHECK-NEXT:    stg x0, [x0], #16
 ; CHECK-NEXT:  .LBB6_1: // %entry
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    sub x8, x8, #32
+; CHECK-NEXT:    subs x8, x8, #32
 ; CHECK-NEXT:    st2g x0, [x0], #32
-; CHECK-NEXT:    cbnz x8, .LBB6_1
+; CHECK-NEXT:    b.ne .LBB6_1
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    ret
 entry:
@@ -106,9 +106,9 @@ define void @stzg17(ptr %p) {
 ; CHECK-NEXT:    stzg x0, [x0], #16
 ; CHECK-NEXT:  .LBB8_1: // %entry
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    sub x8, x8, #32
+; CHECK-NEXT:    subs x8, x8, #32
 ; CHECK-NEXT:    stz2g x0, [x0], #32
-; CHECK-NEXT:    cbnz x8, .LBB8_1
+; CHECK-NEXT:    b.ne .LBB8_1
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    ret
 entry:
@@ -155,8 +155,8 @@ define void @stg_alloca17() nounwind {
 ; CHECK-NEXT:  .LBB11_1: // %entry
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    st2g sp, [sp], #32
-; CHECK-NEXT:    sub x8, x8, #32
-; CHECK-NEXT:    cbnz x8, .LBB11_1
+; CHECK-NEXT:    subs x8, x8, #32
+; CHECK-NEXT:    b.ne .LBB11_1
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    stg sp, [sp], #16
 ; CHECK-NEXT:    ldr x29, [sp], #16 // 8-byte Folded Reload
@@ -179,9 +179,9 @@ define void @stg_alloca18() uwtable {
 ; CHECK-NEXT:    stg x9, [x9], #16
 ; CHECK-NEXT:  .LBB12_1: // %entry
 ; CHECK-NEXT:    // =>This Inner Loop Header: Depth=1
-; CHECK-NEXT:    sub x8, x8, #32
+; CHECK-NEXT:    subs x8, x8, #32
 ; CHECK-NEXT:    st2g x9, [x9], #32
-; CHECK-NEXT:    cbnz x8, .LBB12_1
+; CHECK-NEXT:    b.ne .LBB12_1
 ; CHECK-NEXT:  // %bb.2: // %entry
 ; CHECK-NEXT:    add sp, sp, #272
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
@@ -195,5 +195,20 @@ entry:
   ret void
 }
 
+; Verify that SLH works together with MTE stack tagging,
+; see issue https://github.com/llvm/llvm-project/issues/61830
+define void @test_slh() speculative_load_hardening {
+; CHECK-LABEL: test_slh
+; Verify that the memtag loop uses a b.cc conditional branch
+; rather than an cb[n]z branch.
+;CHECK-NOT:   cb{{n?}}z
+;CHECK:       b.
+  %d = alloca [48 x i32], align 4
+  call void @b(ptr %d)
+  ret void
+}
+declare void @b(ptr)
+
+
 declare void @llvm.aarch64.settag(ptr %p, i64 %a)
 declare void @llvm.aarch64.settag.zero(ptr %p, i64 %a)


        


More information about the llvm-commits mailing list