[llvm] r254147 - [mips][ias] Replace anchor comments with anchor instructions in tests.

Daniel Sanders via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 26 02:26:24 PST 2015


Author: dsanders
Date: Thu Nov 26 04:26:18 2015
New Revision: 254147

URL: http://llvm.org/viewvc/llvm-project?rev=254147&view=rev
Log:
[mips][ias] Replace anchor comments with anchor instructions in tests.

Summary:
This is because IAS will delete the comments. NFC at the moment but it will
prevent a failure once IAS is the default.

Reviewers: vkalintiris

Subscribers: llvm-commits, dsanders

Differential Revision: http://reviews.llvm.org/D14704

Modified:
    llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs.ll
    llvm/trunk/test/CodeGen/Mips/no-odd-spreg-msa.ll

Modified: llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs.ll?rev=254147&r1=254146&r2=254147&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/cconv/arguments-varargs.ll Thu Nov 26 04:26:18 2015
@@ -55,7 +55,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]]
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -89,7 +89,7 @@ entry:
 
 ; ALL-DAG:       sh [[ARG1]], 2([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -117,12 +117,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i16
   %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
   store volatile i16 %arg1, i16* %e1, align 2
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i16
   %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
   store volatile i16 %arg2, i16* %e2, align 2
@@ -173,7 +173,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]]
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -207,7 +207,7 @@ entry:
 
 ; ALL-DAG:       sw [[ARG1]], 4([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -235,12 +235,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i32
   %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
   store volatile i32 %arg1, i32* %e1, align 4
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i32
   %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
   store volatile i32 %arg2, i32* %e2, align 4
@@ -291,7 +291,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]] (and realign pointer for O32)
 ; O32:           lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -328,7 +328,7 @@ entry:
 ; NEW-DAG:       ld [[ARG1:\$[0-9]+]], 0([[VA]])
 ; NEW-DAG:       sd [[ARG1]], 8([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; FIXME: We're still aligned from the last one but CodeGen doesn't spot that.
@@ -362,12 +362,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i64
   %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
   store volatile i64 %arg1, i64* %e1, align 8
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i64
   %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
   store volatile i64 %arg2, i64* %e2, align 8
@@ -418,7 +418,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]]
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -452,7 +452,7 @@ entry:
 
 ; ALL-DAG:       sh [[ARG1]], 2([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -480,12 +480,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i16
   %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
   store volatile i16 %arg1, i16* %e1, align 2
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i16
   %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
   store volatile i16 %arg2, i16* %e2, align 2
@@ -536,7 +536,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]]
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -570,7 +570,7 @@ entry:
 
 ; ALL-DAG:       sw [[ARG1]], 4([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -598,12 +598,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i32
   %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
   store volatile i32 %arg1, i32* %e1, align 4
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i32
   %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
   store volatile i32 %arg2, i32* %e2, align 4
@@ -654,7 +654,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]] (and realign pointer for O32)
 ; O32:           lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -691,7 +691,7 @@ entry:
 ; NEW-DAG:       ld [[ARG1:\$[0-9]+]], 0([[VA]])
 ; NEW-DAG:       sd [[ARG1]], 8([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; FIXME: We're still aligned from the last one but CodeGen doesn't spot that.
@@ -725,12 +725,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i64
   %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
   store volatile i64 %arg1, i64* %e1, align 8
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i64
   %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
   store volatile i64 %arg2, i64* %e2, align 8
@@ -780,7 +780,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]]
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -814,7 +814,7 @@ entry:
 
 ; ALL-DAG:       sh [[ARG1]], 2([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -842,12 +842,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i16
   %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1
   store volatile i16 %arg1, i16* %e1, align 2
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i16
   %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2
   store volatile i16 %arg2, i16* %e2, align 2
@@ -897,7 +897,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]]
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -931,7 +931,7 @@ entry:
 
 ; ALL-DAG:       sw [[ARG1]], 4([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; O32-DAG:       lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -959,12 +959,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i32
   %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1
   store volatile i32 %arg1, i32* %e1, align 4
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i32
   %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2
   store volatile i32 %arg2, i32* %e2, align 4
@@ -1014,7 +1014,7 @@ entry:
 ; Store [[VA]]
 ; O32-DAG:       sw [[VA]], 0([[SP]])
 
-; ALL: # ANCHOR1
+; ALL: teqi $zero, 1
 
 ; Increment [[VA]] (and realign pointer for O32)
 ; O32:           lw [[VA:\$[0-9]+]], 0([[SP]])
@@ -1051,7 +1051,7 @@ entry:
 ; NEW-DAG:       ld [[ARG1:\$[0-9]+]], 0([[VA]])
 ; NEW-DAG:       sd [[ARG1]], 8([[GV]])
 
-; ALL: # ANCHOR2
+; ALL: teqi $zero, 2
 
 ; Increment [[VA]] again.
 ; FIXME: We're still aligned from the last one but CodeGen doesn't spot that.
@@ -1085,12 +1085,12 @@ entry:
   %ap2 = bitcast i8** %ap to i8*
   call void @llvm.va_start(i8* %ap2)
 
-  call void asm sideeffect "# ANCHOR1", ""()
+  call void asm sideeffect "teqi $$zero, 1", ""()
   %arg1 = va_arg i8** %ap, i64
   %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1
   store volatile i64 %arg1, i64* %e1, align 8
 
-  call void asm sideeffect "# ANCHOR2", ""()
+  call void asm sideeffect "teqi $$zero, 2", ""()
   %arg2 = va_arg i8** %ap, i64
   %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2
   store volatile i64 %arg2, i64* %e2, align 8

Modified: llvm/trunk/test/CodeGen/Mips/no-odd-spreg-msa.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/no-odd-spreg-msa.ll?rev=254147&r1=254146&r2=254147&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/no-odd-spreg-msa.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/no-odd-spreg-msa.ll Thu Nov 26 04:26:18 2015
@@ -19,7 +19,7 @@ entry:
   ; On the other hand, if odd single precision registers are not permitted, it
   ; must copy $f13 to an even-numbered register before inserting into the
   ; vector.
-  call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
   %1 = insertelement <4 x float> %0, float %b, i32 0
   store <4 x float> %1, <4 x float>* @v4f32
   ret void
@@ -32,7 +32,7 @@ entry:
 ; NOODDSPREG:     mov.s $f[[F0:[0-9]+]], $f13
 ; NOODDSPREG:     insve.w $w[[W0]][0], $w[[F0]][0]
 ; ODDSPREG:       insve.w $w[[W0]][0], $w13[0]
-; ALL:            # Clobber
+; ALL:            teqi $zero, 1
 ; ALL-NOT: sdc1
 ; ALL-NOT: ldc1
 ; ALL:            st.w $w[[W0]], 0($[[R0]])
@@ -53,7 +53,7 @@ entry:
   ; On the other hand, if odd single precision registers are not permitted, it
   ; must copy $f13 to an even-numbered register before inserting into the
   ; vector.
-  call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
   %1 = insertelement <4 x float> %0, float %b, i32 1
   store <4 x float> %1, <4 x float>* @v4f32
   ret void
@@ -66,7 +66,7 @@ entry:
 ; NOODDSPREG:     mov.s $f[[F0:[0-9]+]], $f13
 ; NOODDSPREG:     insve.w $w[[W0]][1], $w[[F0]][0]
 ; ODDSPREG:       insve.w $w[[W0]][1], $w13[0]
-; ALL:            # Clobber
+; ALL:            teqi $zero, 1
 ; ALL-NOT: sdc1
 ; ALL-NOT: ldc1
 ; ALL:            st.w $w[[W0]], 0($[[R0]])
@@ -83,7 +83,7 @@ entry:
   ;
   ; On the other hand, if odd single precision registers are not permitted, it
   ; must move it to $f12/$w12.
-  call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
 
   %2 = extractelement <4 x float> %1, i32 0
   ret float %2
@@ -94,7 +94,7 @@ entry:
 ; ALL:            ld.w $w12, 0($[[R0]])
 ; ALL:            move.v $w[[W0:13]], $w12
 ; NOODDSPREG:     move.v $w[[W0:12]], $w13
-; ALL:            # Clobber
+; ALL:            teqi $zero, 1
 ; ALL-NOT: st.w
 ; ALL-NOT: ld.w
 ; ALL:            mov.s $f0, $f[[W0]]
@@ -111,7 +111,7 @@ entry:
   ;
   ; On the other hand, if odd single precision registers are not permitted, it
   ; must be spilled.
-  call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
+  call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"()
 
   %2 = extractelement <4 x float> %1, i32 1
   ret float %2
@@ -124,7 +124,7 @@ entry:
 ; NOODDSPREG:     st.w $w[[W0]], 0($sp)
 ; ODDSPREG-NOT: st.w
 ; ODDSPREG-NOT: ld.w
-; ALL:            # Clobber
+; ALL:            teqi $zero, 1
 ; ODDSPREG-NOT: st.w
 ; ODDSPREG-NOT: ld.w
 ; NOODDSPREG:     ld.w $w0, 0($sp)




More information about the llvm-commits mailing list