[llvm] 7af4f44 - [aarch64][tests] Add tests which show current lack of implicit null support

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 16 12:55:38 PDT 2020


Author: Philip Reames
Date: 2020-09-16T12:55:29-07:00
New Revision: 7af4f44c3e3dfb4483fb4dcc200f9376e96d6208

URL: https://github.com/llvm/llvm-project/commit/7af4f44c3e3dfb4483fb4dcc200f9376e96d6208
DIFF: https://github.com/llvm/llvm-project/commit/7af4f44c3e3dfb4483fb4dcc200f9376e96d6208.diff

LOG: [aarch64][tests] Add tests which show current lack of implicit null support

I will be posting a patch which adds appropriate target support shortly; landing the tests so that the diffs are clear.

Added: 
    llvm/test/CodeGen/AArch64/implicit-null-check.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/implicit-null-check.ll b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
new file mode 100644
index 000000000000..5e7bb6f5bba0
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/implicit-null-check.ll
@@ -0,0 +1,422 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -verify-machineinstrs -O3 -mtriple=aarch64-unknown-unknown -enable-implicit-null-checks | FileCheck %s
+
+; Basic test for implicit null check conversion - this is analogous to the
+; file with the same name in the X86 tree, but adjusted to remove patterns
+; related to memory folding of arithmetic (since aarch64 doesn't), and add
+; a couple of aarch64 specific tests.
+; NOTE: Currently negative tests as these are being precommitted before
+; the changes to enable.
+
+define i32 @imp_null_check_load_fallthrough(i32* %x) {
+; CHECK-LABEL: imp_null_check_load_fallthrough:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB0_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB0_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ not_null:
+  %t = load i32, i32* %x
+  ret i32 %t
+
+is_null:
+  ret i32 42
+}
+
+
+define i32 @imp_null_check_load_reorder(i32* %x) {
+; CHECK-LABEL: imp_null_check_load_reorder:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB1_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB1_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %t = load i32, i32* %x
+  ret i32 %t
+}
+
+define i32 @imp_null_check_unordered_load(i32* %x) {
+; CHECK-LABEL: imp_null_check_unordered_load:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB2_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB2_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %t = load atomic i32, i32* %x unordered, align 4
+  ret i32 %t
+}
+
+
+define i32 @imp_null_check_seq_cst_load(i32* %x) {
+; CHECK-LABEL: imp_null_check_seq_cst_load:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB3_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldar w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB3_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %t = load atomic i32, i32* %x seq_cst, align 4
+  ret i32 %t
+}
+
+;; Might be memory mapped IO, so can't rely on fault behavior
+define i32 @imp_null_check_volatile_load(i32* %x) {
+; CHECK-LABEL: imp_null_check_volatile_load:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB4_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB4_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %t = load volatile i32, i32* %x, align 4
+  ret i32 %t
+}
+
+
+define i8 @imp_null_check_load_i8(i8* %x) {
+; CHECK-LABEL: imp_null_check_load_i8:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB5_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldrb w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB5_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i8* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i8 42
+
+ not_null:
+  %t = load i8, i8* %x
+  ret i8 %t
+}
+
+define i256 @imp_null_check_load_i256(i256* %x) {
+; CHECK-LABEL: imp_null_check_load_i256:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB6_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldp x8, x1, [x0]
+; CHECK-NEXT:    ldp x2, x3, [x0, #16]
+; CHECK-NEXT:    mov x0, x8
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB6_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    mov x1, xzr
+; CHECK-NEXT:    mov x2, xzr
+; CHECK-NEXT:    mov x3, xzr
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i256* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i256 42
+
+ not_null:
+  %t = load i256, i256* %x
+  ret i256 %t
+}
+
+
+
+define i32 @imp_null_check_gep_load(i32* %x) {
+; CHECK-LABEL: imp_null_check_gep_load:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB7_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w0, [x0, #128]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB7_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %x.gep = getelementptr i32, i32* %x, i32 32
+  %t = load i32, i32* %x.gep
+  ret i32 %t
+}
+
+define i32 @imp_null_check_add_result(i32* %x, i32 %p) {
+; CHECK-LABEL: imp_null_check_add_result:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB8_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    add w0, w8, w1
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB8_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %t = load i32, i32* %x
+  %p1 = add i32 %t, %p
+  ret i32 %p1
+}
+
+; Can hoist over a potential faulting instruction as long as we don't
+; change the conditions under which the instruction faults.
+define i32 @imp_null_check_hoist_over_udiv(i32* %x, i32 %a, i32 %b) {
+; CHECK-LABEL: imp_null_check_hoist_over_udiv:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB9_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    udiv w9, w1, w2
+; CHECK-NEXT:    add w0, w8, w9
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB9_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %p1 = udiv i32 %a, %b
+  %t = load i32, i32* %x
+  %res = add i32 %t, %p1
+  ret i32 %res
+}
+
+
+define i32 @imp_null_check_hoist_over_unrelated_load(i32* %x, i32* %y, i32* %z) {
+; CHECK-LABEL: imp_null_check_hoist_over_unrelated_load:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB10_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w8, [x1]
+; CHECK-NEXT:    ldr w0, [x0]
+; CHECK-NEXT:    str w8, [x2]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB10_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %t0 = load i32, i32* %y
+  %t1 = load i32, i32* %x
+  store i32 %t0, i32* %z
+  ret i32 %t1
+}
+
+define i32 @imp_null_check_gep_load_with_use_dep(i32* %x, i32 %a) {
+; CHECK-LABEL: imp_null_check_gep_load_with_use_dep:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB11_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldr w8, [x0]
+; CHECK-NEXT:    add w9, w0, w1
+; CHECK-NEXT:    add w8, w9, w8
+; CHECK-NEXT:    add w0, w8, #4 // =4
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB11_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %x.loc = getelementptr i32, i32* %x, i32 1
+  %y = ptrtoint i32* %x.loc to i32
+  %b = add i32 %a, %y
+  %t = load i32, i32* %x
+  %z = add i32 %t, %b
+  ret i32 %z
+}
+
+define i32 @imp_null_check_load_fence1(i32* %x) {
+; CHECK-LABEL: imp_null_check_load_fence1:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB12_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    dmb ishld
+; CHECK-NEXT:    ldr w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB12_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+is_null:
+  ret i32 42
+
+not_null:
+  fence acquire
+  %t = load i32, i32* %x
+  ret i32 %t
+}
+
+define i32 @imp_null_check_load_fence2(i32* %x) {
+; CHECK-LABEL: imp_null_check_load_fence2:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB13_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    dmb ish
+; CHECK-NEXT:    ldr w0, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB13_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+is_null:
+  ret i32 42
+
+not_null:
+  fence seq_cst
+  %t = load i32, i32* %x
+  ret i32 %t
+}
+
+define void @imp_null_check_store(i32* %x) {
+; CHECK-LABEL: imp_null_check_store:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB14_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB14_2: // %is_null
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret void
+
+ not_null:
+  store i32 1, i32* %x
+  ret void
+}
+
+define void @imp_null_check_unordered_store(i32* %x) {
+; CHECK-LABEL: imp_null_check_unordered_store:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB15_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    mov w8, #1
+; CHECK-NEXT:    str w8, [x0]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB15_2: // %is_null
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret void
+
+ not_null:
+  store atomic i32 1, i32* %x unordered, align 4
+  ret void
+}
+
+define i32 @imp_null_check_neg_gep_load(i32* %x) {
+; CHECK-LABEL: imp_null_check_neg_gep_load:
+; CHECK:       // %bb.0: // %entry
+; CHECK-NEXT:    cbz x0, .LBB16_2
+; CHECK-NEXT:  // %bb.1: // %not_null
+; CHECK-NEXT:    ldur w0, [x0, #-128]
+; CHECK-NEXT:    ret
+; CHECK-NEXT:  .LBB16_2: // %is_null
+; CHECK-NEXT:    mov w0, #42
+; CHECK-NEXT:    ret
+ entry:
+  %c = icmp eq i32* %x, null
+  br i1 %c, label %is_null, label %not_null, !make.implicit !0
+
+ is_null:
+  ret i32 42
+
+ not_null:
+  %x.gep = getelementptr i32, i32* %x, i32 -32
+  %t = load i32, i32* %x.gep
+  ret i32 %t
+}
+
+!0 = !{}


        


More information about the llvm-commits mailing list