[llvm] 5fb979d - [llvm][test] Add missing FileCheck colons. NFC

Jon Roelofs via llvm-commits llvm-commits at lists.llvm.org
Thu May 21 08:29:58 PDT 2020


Author: Jon Roelofs
Date: 2020-05-21T09:29:27-06:00
New Revision: 5fb979dd0697c1a88131bc8709a0928ef4990195

URL: https://github.com/llvm/llvm-project/commit/5fb979dd0697c1a88131bc8709a0928ef4990195
DIFF: https://github.com/llvm/llvm-project/commit/5fb979dd0697c1a88131bc8709a0928ef4990195.diff

LOG: [llvm][test] Add missing FileCheck colons. NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp32.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp64.ll
    llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-int8.ll
    llvm/test/Transforms/LoopVectorize/ARM/tail-folding-counting-down.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp32.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp32.ll
index 6486b1596d1e..add3622ebf7e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp32.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp32.ll
@@ -4,7 +4,7 @@ define <vscale x 4 x float> @fmmla_s(<vscale x 4 x float> %r, <vscale x 4 x floa
 entry:
 ; CHECK-LABEL: fmmla_s:
 ; CHECK-NEXT:  fmmla   z0.s, z1.s, z2.s
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x float> @llvm.aarch64.sve.fmmla.nxv4f32(<vscale x 4 x float> %r, <vscale x 4 x float> %a, <vscale x 4 x float> %b)
   ret <vscale x 4 x float> %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp64.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp64.ll
index 9f6ff187e0c5..8315e3023a6e 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp64.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-fp64.ll
@@ -5,7 +5,7 @@ define <vscale x 2 x double> @fmmla_d(<vscale x 2 x double> %r, <vscale x 2 x do
 entry:
 ; CHECK-LABEL: fmmla_d:
 ; CHECK-NEXT:  fmmla   z0.d, z1.d, z2.d
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 2 x double> @llvm.aarch64.sve.fmmla.nxv2f64(<vscale x 2 x double> %r, <vscale x 2 x double> %a, <vscale x 2 x double> %b)
   ret <vscale x 2 x double> %val
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-int8.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-int8.ll
index 6febb71e7db0..2d672c86cb6a 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-int8.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-matmul-int8.ll
@@ -22,7 +22,7 @@ define <vscale x 4 x i32> @usmmla(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a,
 entry:
 ; CHECK-LABEL: usmmla:
 ; CHECK-NEXT:  usmmla   z0.s, z1.b, z2.b
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usmmla.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
   ret <vscale x 4 x i32> %val
 }
@@ -31,7 +31,7 @@ define <vscale x 4 x i32> @usdot(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <
 entry:
 ; CHECK-LABEL: usdot:
 ; CHECK-NEXT:  usdot   z0.s, z1.b, z2.b
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usdot.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
   ret <vscale x 4 x i32> %val
 }
@@ -40,7 +40,7 @@ define <vscale x 4 x i32> @usdot_lane_0(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: usdot_lane_0:
 ; CHECK-NEXT:  usdot   z0.s, z1.b, z2.b[0]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usdot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 0)
   ret <vscale x 4 x i32> %val
 }
@@ -49,7 +49,7 @@ define <vscale x 4 x i32> @usdot_lane_1(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: usdot_lane_1:
 ; CHECK-NEXT:  usdot   z0.s, z1.b, z2.b[1]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usdot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 1)
   ret <vscale x 4 x i32> %val
 }
@@ -58,7 +58,7 @@ define <vscale x 4 x i32> @usdot_lane_2(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: usdot_lane_2:
 ; CHECK-NEXT:  usdot   z0.s, z1.b, z2.b[2]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usdot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 2)
   ret <vscale x 4 x i32> %val
 }
@@ -67,7 +67,7 @@ define <vscale x 4 x i32> @usdot_lane_3(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: usdot_lane_3:
 ; CHECK-NEXT:  usdot   z0.s, z1.b, z2.b[3]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.usdot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 3)
   ret <vscale x 4 x i32> %val
 }
@@ -76,7 +76,7 @@ define <vscale x 4 x i32> @sudot_lane_0(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: sudot_lane_0:
 ; CHECK-NEXT:  sudot   z0.s, z1.b, z2.b[0]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sudot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 0)
   ret <vscale x 4 x i32> %val
 }
@@ -85,7 +85,7 @@ define <vscale x 4 x i32> @sudot_lane_1(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: sudot_lane_1:
 ; CHECK-NEXT:  sudot   z0.s, z1.b, z2.b[1]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sudot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 1)
   ret <vscale x 4 x i32> %val
 }
@@ -94,7 +94,7 @@ define <vscale x 4 x i32> @sudot_lane_2(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: sudot_lane_2:
 ; CHECK-NEXT:  sudot   z0.s, z1.b, z2.b[2]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sudot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 2)
   ret <vscale x 4 x i32> %val
 }
@@ -103,7 +103,7 @@ define <vscale x 4 x i32> @sudot_lane_3(<vscale x 4 x i32> %r, <vscale x 16 x i8
 entry:
 ; CHECK-LABEL: sudot_lane_3:
 ; CHECK-NEXT:  sudot   z0.s, z1.b, z2.b[3]
-; CHECK-NEXT : ret
+; CHECK-NEXT:  ret
   %val = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sudot.lane.nxv4i32(<vscale x 4 x i32> %r, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, i32 3)
   ret <vscale x 4 x i32> %val
 }

diff  --git a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-counting-down.ll b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-counting-down.ll
index 5fe68bf014cb..274b626c8d91 100644
--- a/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-counting-down.ll
+++ b/llvm/test/Transforms/LoopVectorize/ARM/tail-folding-counting-down.ll
@@ -292,7 +292,7 @@ while.end:
 ;
 define dso_local void @sgt_for_loop(i8* noalias nocapture readonly %a, i8* noalias nocapture readonly %b, i8* noalias nocapture %c, i32 %N) local_unnamed_addr #0 {
 ; COMMON-LABEL: @sgt_for_loop(
-; COMMON :      vector.body:
+; COMMON:       vector.body:
 ; CHECK-PREFER: masked.load
 ; CHECK-PREFER: masked.load
 ; CHECK-PREFER: masked.store


        


More information about the llvm-commits mailing list