[PATCH] D156799: Update generic scheduling to use A510 scheduling model

Dave Green via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 3 04:00:38 PDT 2023


dmgreen added a comment.

Thanks. If you can clean up these tests, then this looks good to me.



================
Comment at: llvm/test/CodeGen/AArch64/aarch64-dynamic-stack-layout.ll:86
+;
+; CHECK-LABEL: novla_nodynamicrealign_call:
+; CHECK:       // %bb.0: // %entry
----------------
This looks like it would be better with the manual check lines below.


================
Comment at: llvm/test/CodeGen/AArch64/arm64-ld1.ll:1
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
 ; RUN: llc < %s -mtriple=arm64-eabi -aarch64-neon-syntax=apple -verify-machineinstrs -asm-verbose=false | FileCheck %s
----------------
This doesn't look like it has generated properly.


================
Comment at: llvm/test/CodeGen/AArch64/arm64-promote-const-complex-initializers.ll:12
 define [1 x <4 x float>] @test1() {
-; CHECK-LABEL:    .p2align    4, 0x0              ; -- Begin function test1
-; CHECK-NEXT: lCPI0_0:
-; CHECK-NEXT:     .quad   0                       ; 0x0
-; CHECK-NEXT:     .quad   4575657221408423936     ; 0x3f80000000000000
-; CHECK-NEXT:     .section    __TEXT,__text,regular,pure_instructions
-; CHECK-NEXT:     .globl  _test1
-; CHECK-NEXT:     .p2align    2
-; CHECK-NEXT: _test1:                                 ; @test1
-; CHECK-NEXT:     .cfi_startproc
-; CHECK-NEXT: ; %bb.0:
-; CHECK-NEXT: Lloh0:
-; CHECK-NEXT:     adrp    x8, lCPI0_0 at PAGE
-; CHECK-NEXT: Lloh1:
-; CHECK-NEXT:     ldr q0, [x8, lCPI0_0 at PAGEOFF]
-; CHECK-NEXT:     ret
+; CHECK-LABEL: test1:
+; CHECK:       ; %bb.0:
----------------
This doesn't look like it is checking everything it did in the past.


================
Comment at: llvm/test/CodeGen/AArch64/arm64_32.ll:17
 define ptr @test_global_addr() {
-; CHECK-LABEL: test_global_addr:
-; CHECK: adrp [[PAGE:x[0-9]+]], _var32 at PAGE
-; CHECK-OPT: add x0, [[PAGE]], _var32 at PAGEOFF
-; CHECK-FAST: add [[TMP:x[0-9]+]], [[PAGE]], _var32 at PAGEOFF
-; CHECK-FAST: and x0, [[TMP]], #0xffffffff
+; CHECK-OPT-LABEL: test_global_addr:
+; CHECK-OPT:       ; %bb.0:
----------------
This file might be better manually generated. Some of the check lines and comments are deliberately placed.


================
Comment at: llvm/test/CodeGen/AArch64/atomic-ops-lse.ll:36
+;
+; CHECK-REG-LABEL: test_atomic_load_add_i8:
+; CHECK-REG:       // %bb.0:
----------------
It looks like ths RUN line for CHECK-REG is trying to check that there are no invalid register formed. You might be able to change it to `--check-prefixes=CHECK,CHECK-REG`


================
Comment at: llvm/test/CodeGen/AArch64/bfis-in-loop.ll:13
 
 define i64 @bfis_in_loop_zero() {
 entry:
----------------
This doesn't look right


================
Comment at: llvm/test/CodeGen/AArch64/f16-instructions.ll:13
 ; RUN: --check-prefixes=FALLBACK-FP16,GISEL-FP16,GISEL
-
 target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 define half @test_fadd(half %a, half %b) #0 {
----------------
This one needs to be done manually.


================
Comment at: llvm/test/CodeGen/AArch64/fmlal-loreg.ll:3
 ; RUN: llc -mtriple=aarch64 -mattr=+fp16fml -verify-machineinstrs %s -o - 2>&1 | FileCheck %s
-
 ; This tests that the fmlal/fmlal2 instructions only accept lo registers for
 ; the index operand, using inline asm to force the available registers.
----------------
I'm not sure why these blank lines were removed? I think they can be kept in.


================
Comment at: llvm/test/CodeGen/AArch64/misched-detail-resource-booking-01.mir:29
 ---
 name:            umull_and_v8i32
 alignment:       4
----------------
This file can specify -mcpu=cortex-a55 to keep the test as it was before.


================
Comment at: llvm/test/CodeGen/AArch64/nontemporal-load.ll:5
 define <4 x double> @test_ldnp_v4f64(ptr %A) {
+;
 ; CHECK-LABEL: test_ldnp_v4f64:
----------------
These newlines and blank ; lines look funny.


================
Comment at: llvm/test/CodeGen/AArch64/shrink-wrapping-vla.ll:18
 define dso_local void @f(i32 %n, ptr nocapture %x) uwtable {
+; CHECK-LABEL: f:
+; CHECK:       // %bb.0: // %entry
----------------
Manual update might be better.


================
Comment at: llvm/test/CodeGen/AArch64/sme2-intrinsics-min.ll:140
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }
 @multi_vec_min_single_x4_s8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zdn1, <vscale x 16 x i8> %zdn2, <vscale x 16 x i8> %zdn3, <vscale x 16 x i8> %zdn4, <vscale x 16 x i8> %zm) {
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }
----------------
This doesn't look right.


================
Comment at: llvm/test/CodeGen/AArch64/sme2-intrinsics-rshl.ll:53
 ; SRSHL (Single, x4)
-
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }
 @multi_vec_rounding_shl_single_x4_s8(<vscale x 16 x i8> %dummy, <vscale x 16 x i8> %zdn1, <vscale x 16 x i8> %zdn2, <vscale x 16 x i8> %zdn3, <vscale x 16 x i8> %zdn4, <vscale x 16 x i8> %zm) {
----------------
This doesn't look right.


================
Comment at: llvm/test/CodeGen/AArch64/sme2-intrinsics-sqdmulh.ll:54
 define { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }
 @multi_vec_sat_double_mulh_single_x4_s8(<vscale x 16 x i8> %unused, <vscale x 16 x i8> %zdn1, <vscale x 16 x i8> %zdn2, <vscale x 16 x i8> %zdn3, <vscale x 16 x i8> %zdn4, <vscale x 16 x i8> %zm) {
   %res = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> }
----------------
This doesn't look right.


================
Comment at: llvm/test/CodeGen/AArch64/speculation-hardening-loads.ll:5
 define i128 @ldp_single_csdb(ptr %p) speculative_load_hardening {
+; CHECK-LABEL: ldp_single_csdb:
+; CHECK:       // %bb.0: // %entry
----------------
Manual update might be better.


================
Comment at: llvm/test/CodeGen/AArch64/split-vector-insert.ll:13
 define <vscale x 2 x i64> @test_nxv2i64_v8i64(<vscale x 2 x i64> %a, <8 x i64> %b) #0 {
-; CHECK-LEGALIZATION: Legally typed node: [[T1:t[0-9]+]]: nxv2i64 = insert_subvector {{t[0-9]+}}, {{t[0-9]+}}, Constant:i64<0>
-; CHECK-LEGALIZATION: Legally typed node: [[T2:t[0-9]+]]: nxv2i64 = insert_subvector [[T1]], {{t[0-9]+}}, Constant:i64<2>
-; CHECK-LEGALIZATION: Legally typed node: [[T3:t[0-9]+]]: nxv2i64 = insert_subvector [[T2]], {{t[0-9]+}}, Constant:i64<4>
-; CHECK-LEGALIZATION: Legally typed node: [[T4:t[0-9]+]]: nxv2i64 = insert_subvector [[T3]], {{t[0-9]+}}, Constant:i64<6>
-
+; CHECK-LEGALIZATION-LABEL: test_nxv2i64_v8i64:
+; CHECK-LEGALIZATION:       // %bb.0:
----------------
This was testing the debug output before.


================
Comment at: llvm/test/CodeGen/AArch64/stack-guard-sysreg.ll:1
-; RUN: split-file %s %t
 ; RUN: cat %t/main.ll %t/a.ll > %t/a2.ll
 ; RUN: cat %t/main.ll %t/b.ll > %t/b2.ll
----------------
Can you explain this one? Are the two versions now too different to keep the same?


================
Comment at: llvm/test/CodeGen/AArch64/sve-fixed-length-build-vector.ll:56
 define void @build_vector_no_stride_v4i64(ptr %a) #0 {
-; VBITS_GE_256-LABEL:  .LCPI4_0:
-; VBITS_GE_256:         .xword  0
----------------
Keep this


================
Comment at: llvm/test/CodeGen/AArch64/sve-split-int-pred-reduce.ll:25
+; CHECK-NEXT:    ptrue p1.b
+; CHECK-NEXT:    nots p0.b, p1/z, p0.b
 ; CHECK-NEXT:    cset w0, eq
----------------
This is nice :)


================
Comment at: llvm/test/CodeGen/AArch64/swift-return.ll:7
 define i16 @test1(i32) {
+;
+; CHECK-LABEL: test1:
----------------
Manually update this one may be best.


================
Comment at: llvm/test/CodeGen/AArch64/vec-combine-compare-to-bitmask.ll:10
 ; Bits used in mask
-; CHECK-LABEL: lCPI0_0
-; CHECK-NEXT: .byte	1
-; CHECK-NEXT: .byte	2
-; CHECK-NEXT: .byte	4
-; CHECK-NEXT: .byte	8
-; CHECK-NEXT: .byte	16
-; CHECK-NEXT: .byte	32
-; CHECK-NEXT: .byte	64
-; CHECK-NEXT: .byte	128
-; CHECK-NEXT: .byte	1
-; CHECK-NEXT: .byte	2
-; CHECK-NEXT: .byte	4
-; CHECK-NEXT: .byte	8
-; CHECK-NEXT: .byte	16
-; CHECK-NEXT: .byte	32
-; CHECK-NEXT: .byte	64
-; CHECK-NEXT: .byte	128
+; CHECK-LABEL: convert_to_bitmask16:
+; CHECK:       ; %bb.0:
----------------
These need to keep checking the constant pools


================
Comment at: llvm/test/CodeGen/AArch64/vec-combine-compare-truncate-store.ll:5
 define void @store_16_elements(<16 x i8> %vec, ptr %out) {
 ; Bits used in mask
+; CHECK-LABEL: store_16_elements:
----------------
These too.


================
Comment at: llvm/test/CodeGen/AArch64/zext-to-tbl.ll:5
 
-; CHECK-LABEL: lCPI0_0:
-; CHECK-NEXT:    .byte   0                               ; 0x0
----------------
These too.


Repository:
  rG LLVM Github Monorepo

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D156799/new/

https://reviews.llvm.org/D156799



More information about the llvm-commits mailing list