[llvm] r361908 - [AArch64] auto-generate complete test checks; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Tue May 28 18:37:44 PDT 2019


Author: spatel
Date: Tue May 28 18:37:44 2019
New Revision: 361908

URL: http://llvm.org/viewvc/llvm-project?rev=361908&view=rev
Log:
[AArch64] auto-generate complete test checks; NFC

Modified:
    llvm/trunk/test/CodeGen/AArch64/sdag-store-merging-bug.ll

Modified: llvm/trunk/test/CodeGen/AArch64/sdag-store-merging-bug.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/sdag-store-merging-bug.ll?rev=361908&r1=361907&r2=361908&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/sdag-store-merging-bug.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/sdag-store-merging-bug.ll Tue May 28 18:37:44 2019
@@ -1,20 +1,22 @@
-; RUN: llc -o - %s -mtriple aarch64-- -mattr +slow-misaligned-128store -stop-after=instruction-select | FileCheck %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -o - %s -mtriple aarch64-- -mattr +slow-misaligned-128store | FileCheck %s
 ; Checks for a bug where selection dag store merging would construct wrong
 ; indices when extracting values from vectors, resulting in an invalid
 ; lane duplication in this case.
 ; The only way I could trigger stores with mismatching types getting merged was
 ; via the aarch64 slow-misaligned-128store code splitting stores earlier.
 
-; CHECK-LABEL: name: func
-; CHECK: LDRQui
-; CHECK-NOT: INSERT_SUBREG
-; CHECK-NOT: DUP
-; CHECK-NEXT: STRQui
+; aarch64 feature slow-misaligned-128store splits the following store.
+; store merging immediately merges it back together (but used to get the
+; merging wrong), this is the only way I was able to reproduce the bug...
+
 define void @func(<2 x double>* %sptr, <2 x double>* %dptr) {
+; CHECK-LABEL: func:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    ldr q0, [x0]
+; CHECK-NEXT:    str q0, [x1]
+; CHECK-NEXT:    ret
   %load = load <2 x double>, <2 x double>* %sptr, align 8
-  ; aarch64 feature slow-misaligned-128store splits the following store.
-  ; store merging immediately merges it back together (but used to get the
-  ; merging wrong), this is the only way I was able to reproduce the bug...
   store <2 x double> %load, <2 x double>* %dptr, align 4
   ret void
 }




More information about the llvm-commits mailing list