[llvm] 261c56f - [NFC][Codegen] Tune a few tests to not end with a naked `unreachable` terminator

Roman Lebedev via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 2 13:34:34 PDT 2021


Author: Roman Lebedev
Date: 2021-07-02T23:33:30+03:00
New Revision: 261c56f80b1e4d9a4e08094faf79e9296f1939d1

URL: https://github.com/llvm/llvm-project/commit/261c56f80b1e4d9a4e08094faf79e9296f1939d1
DIFF: https://github.com/llvm/llvm-project/commit/261c56f80b1e4d9a4e08094faf79e9296f1939d1.diff

LOG: [NFC][Codegen] Tune a few tests to not end with a naked `unreachable` terminator

These rely on the fact that currently simplifycfg won't really propagate
said `unreachable`, but that is about to change.

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/pr33172.ll
    llvm/test/CodeGen/ARM/Windows/memset.ll
    llvm/test/CodeGen/ARM/machine-cse-cmp.ll
    llvm/test/CodeGen/ARM/memfunc.ll
    llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
    llvm/test/CodeGen/Hexagon/reg-scavengebug.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/pr33172.ll b/llvm/test/CodeGen/AArch64/pr33172.ll
index 098d5358b02d0..e1b4cdc6603c9 100644
--- a/llvm/test/CodeGen/AArch64/pr33172.ll
+++ b/llvm/test/CodeGen/AArch64/pr33172.ll
@@ -22,7 +22,7 @@ entry:
   store i64 %wide.load8281058.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 16) to i64*), align 8
   store i64 %wide.load8291059.4, i64* bitcast (float* getelementptr inbounds ([200 x float], [200 x float]* @main.x, i64 0, i64 18) to i64*), align 8
   tail call void @llvm.memset.p0i8.i64(i8* align 8 bitcast ([200 x float]* @main.b to i8*), i8 0, i64 undef, i1 false) #2
-  unreachable
+  ret void
 }
 
 ; Function Attrs: argmemonly nounwind

diff  --git a/llvm/test/CodeGen/ARM/Windows/memset.ll b/llvm/test/CodeGen/ARM/Windows/memset.ll
index 8cb257c156606..d4d918a29c14b 100644
--- a/llvm/test/CodeGen/ARM/Windows/memset.ll
+++ b/llvm/test/CodeGen/ARM/Windows/memset.ll
@@ -7,7 +7,7 @@ declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
 define void @function() {
 entry:
   call void @llvm.memset.p0i8.i32(i8* bitcast ([512 x i8]* @source to i8*), i8 0, i32 512, i1 false)
-  unreachable
+  ret void
 }
 
 ; CHECK: movs r1, #0

diff  --git a/llvm/test/CodeGen/ARM/machine-cse-cmp.ll b/llvm/test/CodeGen/ARM/machine-cse-cmp.ll
index 49dbb03135f5a..ab5f58c27e768 100644
--- a/llvm/test/CodeGen/ARM/machine-cse-cmp.ll
+++ b/llvm/test/CodeGen/ARM/machine-cse-cmp.ll
@@ -1,6 +1,8 @@
 ; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s
 ;rdar://8003725
 
+declare void @llvm.trap()
+
 @G1 = external global i32
 @G2 = external global i32
 
@@ -38,6 +40,7 @@ for.body.lr.ph:                                   ; preds = %entry
   %1 = icmp sgt i32 %0, 1
   %smax = select i1 %1, i32 %0, i32 1
   call void @llvm.memset.p0i8.i32(i8* getelementptr inbounds ([250 x i8], [250 x i8]* @bar, i32 0, i32 0), i8 0, i32 %smax, i1 false)
+  call void @llvm.trap()
   unreachable
 
 for.cond1.preheader:                              ; preds = %entry

diff  --git a/llvm/test/CodeGen/ARM/memfunc.ll b/llvm/test/CodeGen/ARM/memfunc.ll
index 0fe1f630c57a8..217b88a32de06 100644
--- a/llvm/test/CodeGen/ARM/memfunc.ll
+++ b/llvm/test/CodeGen/ARM/memfunc.ll
@@ -94,7 +94,7 @@ entry:
   ; CHECK-GNUEABI: bl memset
   call void @llvm.memset.p0i8.i32(i8* align 8 %dest, i8 0, i32 500, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that alloca arguments to memory intrinsics are automatically aligned if at least 8 bytes in size
@@ -140,7 +140,7 @@ entry:
   %2 = bitcast [9 x i8]* %arr2 to i8*
   call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that alloca arguments are not aligned if less than 8 bytes in size
@@ -179,7 +179,7 @@ entry:
   %2 = bitcast [7 x i8]* %arr2 to i8*
   call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that alloca arguments are not aligned if size+offset is less than 8 bytes
@@ -218,7 +218,7 @@ entry:
   %2 = getelementptr inbounds [9 x i8], [9 x i8]* %arr2, i32 0, i32 4
   call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that alloca arguments are not aligned if the offset is not a multiple of 4
@@ -257,7 +257,7 @@ entry:
   %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 1
   call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that alloca arguments are not aligned if the offset is unknown
@@ -296,7 +296,7 @@ entry:
   %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 %i
   call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that alloca arguments are not aligned if the GEP is not inbounds
@@ -335,7 +335,7 @@ entry:
   %2 = getelementptr [13 x i8], [13 x i8]* %arr2, i32 0, i32 4
   call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that alloca arguments are not aligned when the offset is past the end of the allocation
@@ -374,7 +374,7 @@ entry:
   %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 16
   call void @llvm.memset.p0i8.i32(i8* %2, i8 1, i32 %n, i1 false)
 
-  unreachable
+  ret void
 }
 
 ; Check that global variables are aligned if they are large enough, but only if
@@ -401,7 +401,7 @@ entry:
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr8, i32 0, i32 0), i32 %n, i1 false)
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([128 x i8], [128 x i8]* @arr9, i32 0, i32 0), i32 %n, i1 false)
   call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr10, i32 0, i32 0), i32 %n, i1 false)
-  unreachable
+  ret void
 }
 
 ; CHECK: {{\.data|\.section.+data}}

diff  --git a/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll b/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
index 777952724ffb9..db56e0a2fafe5 100644
--- a/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
+++ b/llvm/test/CodeGen/Hexagon/branchfolder-keep-impdef.ll
@@ -19,7 +19,7 @@ b2:                                               ; preds = %b1, %b0
   %t1 = phi i8* [ %t0, %b1 ], [ undef, %b0 ]
   %t2 = getelementptr inbounds i8, i8* %t1, i32 %p0
   tail call void @llvm.memmove.p0i8.p0i8.i32(i8* undef, i8* %t2, i32 undef, i1 false) #1
-  unreachable
+  ret void
 }
 
 declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture readonly, i32, i1) #0

diff  --git a/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll b/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll
index d53799bc4d191..b712d1556cea1 100644
--- a/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll
+++ b/llvm/test/CodeGen/Hexagon/reg-scavengebug.ll
@@ -155,10 +155,10 @@ b2:                                               ; preds = %b1
   %v120 = getelementptr <16 x i32>, <16 x i32>* %v2, i32 6
   %v121 = tail call <16 x i32> @llvm.hexagon.V6.vshufoh(<16 x i32> undef, <16 x i32> undef)
   store <16 x i32> %v121, <16 x i32>* %v120, align 64, !tbaa !0
-  unreachable
+  ret void
 
 b3:                                               ; preds = %b1
-  unreachable
+  ret void
 
 b4:                                               ; preds = %b0
   ret void


        


More information about the llvm-commits mailing list