[llvm] 036e7ee - [NFC][AArch64] Regenerate regression tests.

Eli Friedman via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 27 17:08:41 PDT 2024


Author: Eli Friedman
Date: 2024-03-27T17:08:02-07:00
New Revision: 036e7ee9d1f1bdc194b56302f8dd27d5eb6cfdab

URL: https://github.com/llvm/llvm-project/commit/036e7ee9d1f1bdc194b56302f8dd27d5eb6cfdab
DIFF: https://github.com/llvm/llvm-project/commit/036e7ee9d1f1bdc194b56302f8dd27d5eb6cfdab.diff

LOG: [NFC][AArch64] Regenerate regression tests.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
    llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
index 0cf9602adbb094..499c08fa4966f9 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir
@@ -40,11 +40,12 @@ body:             |
 
     ; CHECK-LABEL: name: ldrxrox_breg_oreg
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
-    ; CHECK: $x0 = COPY [[LDRXroX]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $x0 = COPY [[LDRXroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1
@@ -65,11 +66,12 @@ body:             |
     liveins: $d0, $x1
     ; CHECK-LABEL: name: ldrdrox_breg_oreg
     ; CHECK: liveins: $d0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
-    ; CHECK: $d0 = COPY [[LDRDroX]]
-    ; CHECK: RET_ReallyLR implicit $d0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY]], [[COPY1]], 0, 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $d0 = COPY [[LDRDroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d0
     %0:gpr(p0) = COPY $d0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1
@@ -78,6 +80,9 @@ body:             |
     RET_ReallyLR implicit $d0
 ...
 ---
+# This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
+# the G_LOAD
+
 name:            more_than_one_use
 alignment:       4
 legalized:       true
@@ -87,18 +92,17 @@ machineFunctionInfo: {}
 body:             |
   bb.0:
     liveins: $x0, $x1
-    ; This shouldn't be folded, since we reuse the result of the G_PTR_ADD outside
-    ; the G_LOAD
     ; CHECK-LABEL: name: more_than_one_use
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
-    ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
-    ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
-    ; CHECK: $x0 = COPY [[ADDXrr1]]
-    ; CHECK: RET_ReallyLR implicit $x0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY]], [[COPY1]]
+    ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+    ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[LDRXui]]
+    ; CHECK-NEXT: $x0 = COPY [[ADDXrr1]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1
@@ -121,11 +125,12 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: ldrxrox_shl
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: $x2 = COPY [[LDRXroX]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
@@ -148,11 +153,12 @@ body:             |
     liveins: $x0, $x1, $d2
     ; CHECK-LABEL: name: ldrdrox_shl
     ; CHECK: liveins: $x0, $x1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: $d2 = COPY [[LDRDroX]]
-    ; CHECK: RET_ReallyLR implicit $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
@@ -175,11 +181,12 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: ldrxrox_mul_rhs
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: $x2 = COPY [[LDRXroX]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 8
     %2:gpr(s64) = G_MUL %0, %1(s64)
@@ -202,11 +209,12 @@ body:             |
     liveins: $x0, $x1, $d2
     ; CHECK-LABEL: name: ldrdrox_mul_rhs
     ; CHECK: liveins: $x0, $x1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: $d2 = COPY [[LDRDroX]]
-    ; CHECK: RET_ReallyLR implicit $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 8
     %2:gpr(s64) = G_MUL %0, %1(s64)
@@ -229,11 +237,12 @@ body:             |
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: ldrxrox_mul_lhs
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: $x2 = COPY [[LDRXroX]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $x2 = COPY [[LDRXroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 8
     %2:gpr(s64) = G_MUL %1, %0(s64)
@@ -256,11 +265,12 @@ body:             |
     liveins: $x0, $x1, $d2
     ; CHECK-LABEL: name: ldrdrox_mul_lhs
     ; CHECK: liveins: $x0, $x1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: $d2 = COPY [[LDRDroX]]
-    ; CHECK: RET_ReallyLR implicit $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 8
     %2:gpr(s64) = G_MUL %1, %0(s64)
@@ -272,6 +282,9 @@ body:             |
 
 ...
 ---
+# Show that we don't get a shifted load from a mul when we don't have a
+# power of 2. (The bit isn't set on the load.)
+
 name:            mul_not_pow_2
 alignment:       4
 legalized:       true
@@ -280,19 +293,18 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; Show that we don't get a shifted load from a mul when we don't have a
-    ; power of 2. (The bit isn't set on the load.)
     liveins: $x0, $x1, $d2
     ; CHECK-LABEL: name: mul_not_pow_2
     ; CHECK: liveins: $x0, $x1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
-    ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
-    ; CHECK: $d2 = COPY [[LDRDroX]]
-    ; CHECK: RET_ReallyLR implicit $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+    ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 7
     %2:gpr(s64) = G_MUL %1, %0(s64)
@@ -304,6 +316,9 @@ body:             |
 
 ...
 ---
+# Show that we don't get a shifted load from a mul when we don't have
+# the right power of 2. (The bit isn't set on the load.)
+
 name:            mul_wrong_pow_2
 alignment:       4
 legalized:       true
@@ -312,19 +327,18 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; Show that we don't get a shifted load from a mul when we don't have
-    ; the right power of 2. (The bit isn't set on the load.)
     liveins: $x0, $x1, $d2
     ; CHECK-LABEL: name: mul_wrong_pow_2
     ; CHECK: liveins: $x0, $x1, $d2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
-    ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
-    ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
-    ; CHECK: $d2 = COPY [[LDRDroX]]
-    ; CHECK: RET_ReallyLR implicit $d2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16
+    ; CHECK-NEXT: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
+    ; CHECK-NEXT: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: $d2 = COPY [[LDRDroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $d2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 16
     %2:gpr(s64) = G_MUL %1, %0(s64)
@@ -336,6 +350,9 @@ body:             |
 
 ...
 ---
+# Show that we can still fall back to the register-register addressing
+# mode when we fail to pull in the shift.
+
 name:            more_than_one_use_shl_1
 alignment:       4
 legalized:       true
@@ -344,19 +361,18 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; Show that we can still fall back to the register-register addressing
-    ; mode when we fail to pull in the shift.
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: more_than_one_use_shl_1
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
-    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
-    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
-    ; CHECK: $x2 = COPY [[ADDXrr]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[UBFMXri]], 0, 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+    ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
@@ -370,6 +386,9 @@ body:             |
 
 ...
 ---
+# Show that when the GEP is used outside a memory op, we don't do any
+# folding at all.
+
 name:            more_than_one_use_shl_2
 alignment:       4
 legalized:       true
@@ -378,22 +397,21 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; Show that when the GEP is used outside a memory op, we don't do any
-    ; folding at all.
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: more_than_one_use_shl_2
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
-    ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
-    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
-    ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
-    ; CHECK: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
-    ; CHECK: $x2 = COPY [[ADDXrr2]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64common = ADDXrr [[COPY1]], [[UBFMXri]]
+    ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrr]], 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+    ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[ADDXri]]
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[ADDXrr]]
+    ; CHECK-NEXT: [[ADDXrr2:%[0-9]+]]:gpr64 = ADDXrr [[COPY2]], [[ADDXrr1]]
+    ; CHECK-NEXT: $x2 = COPY [[ADDXrr2]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
@@ -409,6 +427,9 @@ body:             |
 
 ...
 ---
+# Show that when we have a fastpath for shift-left, we perform the folding
+# if it has more than one use.
+
 name:            more_than_one_use_shl_lsl_fast
 alignment:       4
 legalized:       true
@@ -417,18 +438,17 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; Show that when we have a fastpath for shift-left, we perform the folding
-    ; if it has more than one use.
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: more_than_one_use_shl_lsl_fast
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
-    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
-    ; CHECK: $x2 = COPY [[ADDXrr]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1
+    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[LDRXroX1:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[LDRXroX1]]
+    ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
@@ -442,6 +462,9 @@ body:             |
 
 ...
 ---
+# Show that we don't fold into multiple memory ops when we don't have a
+# fastpath for shift-left.
+
 name:            more_than_one_use_shl_lsl_slow
 alignment:       4
 legalized:       true
@@ -450,19 +473,18 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; Show that we don't fold into multiple memory ops when we don't have a
-    ; fastpath for shift-left.
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: more_than_one_use_shl_lsl_slow
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
-    ; CHECK: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
-    ; CHECK: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
-    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
-    ; CHECK: $x2 = COPY [[ADDXrr]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64common = ADDXrs [[COPY1]], [[COPY]], 3
+    ; CHECK-NEXT: [[LDRXui:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[LDRXui1:%[0-9]+]]:gpr64 = LDRXui [[ADDXrs]], 0 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXui]], [[LDRXui1]]
+    ; CHECK-NEXT: $x2 = COPY [[ADDXrr]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
@@ -476,6 +498,9 @@ body:             |
 
 ...
 ---
+# Show that when we're optimizing for size, we'll do the folding no matter
+# what.
+
 name:            more_than_one_use_shl_minsize
 alignment:       4
 legalized:       true
@@ -484,22 +509,21 @@ tracksRegLiveness: true
 machineFunctionInfo: {}
 body:             |
   bb.0:
-    ; Show that when we're optimizing for size, we'll do the folding no matter
-    ; what.
     liveins: $x0, $x1, $x2
     ; CHECK-LABEL: name: more_than_one_use_shl_minsize
     ; CHECK: liveins: $x0, $x1, $x2
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
-    ; CHECK: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
-    ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
-    ; CHECK: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
-    ; CHECK: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
-    ; CHECK: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
-    ; CHECK: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
-    ; CHECK: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
-    ; CHECK: $x2 = COPY [[ADDXrr1]]
-    ; CHECK: RET_ReallyLR implicit $x2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64 = COPY $x0
+    ; CHECK-NEXT: [[UBFMXri:%[0-9]+]]:gpr64common = UBFMXri [[COPY]], 61, 60
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64common = COPY $x1
+    ; CHECK-NEXT: [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY1]]
+    ; CHECK-NEXT: [[ADDXrs:%[0-9]+]]:gpr64 = ADDXrs [[COPY2]], [[COPY]], 3
+    ; CHECK-NEXT: [[LDRXroX:%[0-9]+]]:gpr64 = LDRXroX [[COPY1]], [[COPY]], 0, 1 :: (load (s64) from %ir.addr)
+    ; CHECK-NEXT: [[ADDXri:%[0-9]+]]:gpr64common = ADDXri [[UBFMXri]], 3, 0
+    ; CHECK-NEXT: [[ADDXrr:%[0-9]+]]:gpr64 = ADDXrr [[LDRXroX]], [[ADDXri]]
+    ; CHECK-NEXT: [[ADDXrr1:%[0-9]+]]:gpr64 = ADDXrr [[ADDXrs]], [[ADDXrr]]
+    ; CHECK-NEXT: $x2 = COPY [[ADDXrr1]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $x2
     %0:gpr(s64) = COPY $x0
     %1:gpr(s64) = G_CONSTANT i64 3
     %2:gpr(s64) = G_SHL %0, %1(s64)
@@ -525,11 +549,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: ldrwrox
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
-    ; CHECK: $w2 = COPY [[LDRWroX]]
-    ; CHECK: RET_ReallyLR implicit $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[LDRWroX:%[0-9]+]]:gpr32 = LDRWroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+    ; CHECK-NEXT: $w2 = COPY [[LDRWroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w2
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1
@@ -549,11 +574,12 @@ body:             |
     liveins: $d0, $x1
     ; CHECK-LABEL: name: ldrsrox
     ; CHECK: liveins: $d0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
-    ; CHECK: $s2 = COPY [[LDRSroX]]
-    ; CHECK: RET_ReallyLR implicit $h2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[LDRSroX:%[0-9]+]]:fpr32 = LDRSroX [[COPY]], [[COPY1]], 0, 0 :: (load (s32) from %ir.addr)
+    ; CHECK-NEXT: $s2 = COPY [[LDRSroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h2
     %0:gpr(p0) = COPY $d0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1
@@ -573,11 +599,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: ldrhrox
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
-    ; CHECK: $h2 = COPY [[LDRHroX]]
-    ; CHECK: RET_ReallyLR implicit $h2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[LDRHroX:%[0-9]+]]:fpr16 = LDRHroX [[COPY]], [[COPY1]], 0, 0 :: (load (s16) from %ir.addr)
+    ; CHECK-NEXT: $h2 = COPY [[LDRHroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $h2
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1
@@ -597,11 +624,12 @@ body:             |
     liveins: $x0, $x1
     ; CHECK-LABEL: name: ldbbrox
     ; CHECK: liveins: $x0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
-    ; CHECK: $w2 = COPY [[LDRBBroX]]
-    ; CHECK: RET_ReallyLR implicit $w2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[LDRBBroX:%[0-9]+]]:gpr32 = LDRBBroX [[COPY]], [[COPY1]], 0, 0 :: (load (s8) from %ir.addr)
+    ; CHECK-NEXT: $w2 = COPY [[LDRBBroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $w2
     %0:gpr(p0) = COPY $x0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1
@@ -621,11 +649,12 @@ body:             |
     liveins: $d0, $x1
     ; CHECK-LABEL: name: ldrqrox
     ; CHECK: liveins: $d0, $x1
-    ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
-    ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
-    ; CHECK: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
-    ; CHECK: $q0 = COPY [[LDRQroX]]
-    ; CHECK: RET_ReallyLR implicit $q0
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr64sp = COPY $d0
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1
+    ; CHECK-NEXT: [[LDRQroX:%[0-9]+]]:fpr128 = LDRQroX [[COPY]], [[COPY1]], 0, 0 :: (load (<2 x s64>) from %ir.addr)
+    ; CHECK-NEXT: $q0 = COPY [[LDRQroX]]
+    ; CHECK-NEXT: RET_ReallyLR implicit $q0
     %0:gpr(p0) = COPY $d0
     %1:gpr(s64) = COPY $x1
     %2:gpr(p0) = G_PTR_ADD %0, %1

diff  --git a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
index cf9ed4d5f0e16a..573f921e638cf8 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-split-and-bitmask-immediate.ll
@@ -20,7 +20,7 @@ entry:
 define i8 @test2(i32 %a) {
 ; CHECK-LABEL: test2:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #135
+; CHECK-NEXT:    mov w8, #135 // =0x87
 ; CHECK-NEXT:    and w8, w0, w8
 ; CHECK-NEXT:    cmp w8, #1024
 ; CHECK-NEXT:    cset w0, eq
@@ -37,7 +37,7 @@ entry:
 define i8 @test3(i32 %a) {
 ; CHECK-LABEL: test3:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #1024
+; CHECK-NEXT:    mov w8, #1024 // =0x400
 ; CHECK-NEXT:    movk w8, #33, lsl #16
 ; CHECK-NEXT:    and w8, w0, w8
 ; CHECK-NEXT:    cmp w8, #1024
@@ -84,7 +84,7 @@ entry:
 define i8 @test6(i64 %a) {
 ; CHECK-LABEL: test6:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #135
+; CHECK-NEXT:    mov w8, #135 // =0x87
 ; CHECK-NEXT:    and x8, x0, x8
 ; CHECK-NEXT:    cmp x8, #1024
 ; CHECK-NEXT:    cset w0, eq
@@ -101,7 +101,7 @@ entry:
 define i8 @test7(i64 %a) {
 ; CHECK-LABEL: test7:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #1024
+; CHECK-NEXT:    mov w8, #1024 // =0x400
 ; CHECK-NEXT:    movk w8, #33, lsl #16
 ; CHECK-NEXT:    and x8, x0, x8
 ; CHECK-NEXT:    cmp x8, #1024
@@ -175,7 +175,7 @@ define i32 @test9(ptr nocapture %x, ptr nocapture readonly %y, i32 %n) {
 ; CHECK-NEXT:    cmp w2, #1
 ; CHECK-NEXT:    b.lt .LBB8_3
 ; CHECK-NEXT:  // %bb.1: // %for.body.preheader
-; CHECK-NEXT:    mov w9, #1024
+; CHECK-NEXT:    mov w9, #1024 // =0x400
 ; CHECK-NEXT:    mov w8, w2
 ; CHECK-NEXT:    movk w9, #32, lsl #16
 ; CHECK-NEXT:  .LBB8_2: // %for.body
@@ -226,7 +226,7 @@ define void @test10(ptr nocapture %x, ptr nocapture readonly %y, ptr nocapture %
 ; CHECK-LABEL: test10:
 ; CHECK:       // %bb.0: // %entry
 ; CHECK-NEXT:    ldr w8, [x1]
-; CHECK-NEXT:    mov w9, #1024
+; CHECK-NEXT:    mov w9, #1024 // =0x400
 ; CHECK-NEXT:    movk w9, #32, lsl #16
 ; CHECK-NEXT:    and w8, w8, w9
 ; CHECK-NEXT:    str w8, [x0]
@@ -253,7 +253,7 @@ entry:
 define i8 @test11(i64 %a) {
 ; CHECK-LABEL: test11:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    mov w8, #-1610612736
+; CHECK-NEXT:    mov w8, #-1610612736 // =0xa0000000
 ; CHECK-NEXT:    and x8, x0, x8
 ; CHECK-NEXT:    cmp x8, #1024
 ; CHECK-NEXT:    cset w0, eq


        


More information about the llvm-commits mailing list