[llvm] r368541 - [X86] Remove redundant '; ' chars ending IR lines in lit tests. NFC
Bjorn Pettersson via llvm-commits
llvm-commits at lists.llvm.org
Sun Aug 11 12:27:14 PDT 2019
Author: bjope
Date: Sun Aug 11 12:27:14 2019
New Revision: 368541
URL: http://llvm.org/viewvc/llvm-project?rev=368541&view=rev
Log:
[X86] Remove redundant ';' chars ending IR lines in lit tests. NFC
Reviewers: RKSimon, craig.topper
Reviewed By: craig.topper
Subscribers: llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D66053
Modified:
llvm/trunk/test/CodeGen/X86/smul_fix.ll
llvm/trunk/test/CodeGen/X86/smul_fix_sat.ll
llvm/trunk/test/CodeGen/X86/smul_fix_sat_constants.ll
llvm/trunk/test/CodeGen/X86/ssub_sat.ll
llvm/trunk/test/CodeGen/X86/uadd_sat.ll
llvm/trunk/test/CodeGen/X86/umul_fix.ll
llvm/trunk/test/CodeGen/X86/usub_sat.ll
Modified: llvm/trunk/test/CodeGen/X86/smul_fix.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/smul_fix.ll?rev=368541&r1=368540&r2=368541&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/smul_fix.ll (original)
+++ llvm/trunk/test/CodeGen/X86/smul_fix.ll Sun Aug 11 12:27:14 2019
@@ -25,8 +25,8 @@ define i32 @func(i32 %x, i32 %y) nounwin
; X86-NEXT: imull {{[0-9]+}}(%esp)
; X86-NEXT: shrdl $2, %edx, %eax
; X86-NEXT: retl
- %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 2);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 2)
+ ret i32 %tmp
}
define i64 @func2(i64 %x, i64 %y) {
@@ -90,8 +90,8 @@ define i64 @func2(i64 %x, i64 %y) {
; X86-NEXT: popl %ebp
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 2)
+ ret i64 %tmp
}
define i4 @func3(i4 %x, i4 %y) nounwind {
@@ -128,8 +128,8 @@ define i4 @func3(i4 %x, i4 %y) nounwind
; X86-NEXT: orb %ah, %al
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
- %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 2)
+ ret i4 %tmp
}
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -193,8 +193,8 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
- %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2)
+ ret <4 x i32> %tmp
}
; These result in regular integer multiplication
@@ -210,8 +210,8 @@ define i32 @func4(i32 %x, i32 %y) nounwi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
- %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 0);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.smul.fix.i32(i32 %x, i32 %y, i32 0)
+ ret i32 %tmp
}
define i64 @func5(i64 %x, i64 %y) {
@@ -237,8 +237,8 @@ define i64 @func5(i64 %x, i64 %y) {
; X86-NEXT: popl %esi
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 0);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 0)
+ ret i64 %tmp
}
define i4 @func6(i4 %x, i4 %y) nounwind {
@@ -263,8 +263,8 @@ define i4 @func6(i4 %x, i4 %y) nounwind
; X86-NEXT: sarb $4, %cl
; X86-NEXT: mulb %cl
; X86-NEXT: retl
- %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 0);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.smul.fix.i4(i4 %x, i4 %y, i32 0)
+ ret i4 %tmp
}
define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -299,8 +299,8 @@ define <4 x i32> @vec2(<4 x i32> %x, <4
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: retl $4
- %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.smul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0)
+ ret <4 x i32> %tmp
}
define i64 @func7(i64 %x, i64 %y) nounwind {
@@ -348,8 +348,8 @@ define i64 @func7(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 32);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 32)
+ ret i64 %tmp
}
define i64 @func8(i64 %x, i64 %y) nounwind {
@@ -409,6 +409,6 @@ define i64 @func8(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 63);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.i64(i64 %x, i64 %y, i32 63)
+ ret i64 %tmp
}
Modified: llvm/trunk/test/CodeGen/X86/smul_fix_sat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/smul_fix_sat.ll?rev=368541&r1=368540&r2=368541&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/smul_fix_sat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/smul_fix_sat.ll Sun Aug 11 12:27:14 2019
@@ -36,8 +36,8 @@ define i32 @func(i32 %x, i32 %y) nounwin
; X86-NEXT: movl $-2147483648, %ecx # imm = 0x80000000
; X86-NEXT: cmovll %ecx, %eax
; X86-NEXT: retl
- %tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 2);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 2)
+ ret i32 %tmp
}
define i64 @func2(i64 %x, i64 %y) nounwind {
@@ -132,8 +132,8 @@ define i64 @func2(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 2);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 2)
+ ret i64 %tmp
}
define i4 @func3(i4 %x, i4 %y) nounwind {
@@ -186,8 +186,8 @@ define i4 @func3(i4 %x, i4 %y) nounwind
; X86-NEXT: sarb $4, %al
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
- %tmp = call i4 @llvm.smul.fix.sat.i4(i4 %x, i4 %y, i32 2);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.smul.fix.sat.i4(i4 %x, i4 %y, i32 2)
+ ret i4 %tmp
}
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -311,8 +311,8 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
- %tmp = call <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2)
+ ret <4 x i32> %tmp
}
; These result in regular integer multiplication
@@ -344,8 +344,8 @@ define i32 @func4(i32 %x, i32 %y) nounwi
; X86-NEXT: cmovol %ecx, %eax
; X86-NEXT: popl %esi
; X86-NEXT: retl
- %tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 0);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.smul.fix.sat.i32(i32 %x, i32 %y, i32 0)
+ ret i32 %tmp
}
define i64 @func5(i64 %x, i64 %y) {
@@ -407,8 +407,8 @@ define i64 @func5(i64 %x, i64 %y) {
; X86-NEXT: popl %edi
; X86-NEXT: .cfi_def_cfa_offset 4
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 0);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 0)
+ ret i64 %tmp
}
define i4 @func6(i4 %x, i4 %y) nounwind {
@@ -451,8 +451,8 @@ define i4 @func6(i4 %x, i4 %y) nounwind
; X86-NEXT: sarb $4, %al
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
- %tmp = call i4 @llvm.smul.fix.sat.i4(i4 %x, i4 %y, i32 0);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.smul.fix.sat.i4(i4 %x, i4 %y, i32 0)
+ ret i4 %tmp
}
define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -569,8 +569,8 @@ define <4 x i32> @vec2(<4 x i32> %x, <4
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
- %tmp = call <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.smul.fix.sat.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0)
+ ret <4 x i32> %tmp
}
define i64 @func7(i64 %x, i64 %y) nounwind {
@@ -654,8 +654,8 @@ define i64 @func7(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 32);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 32)
+ ret i64 %tmp
}
define i64 @func8(i64 %x, i64 %y) nounwind {
@@ -734,6 +734,6 @@ define i64 @func8(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 63);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 %x, i64 %y, i32 63)
+ ret i64 %tmp
}
Modified: llvm/trunk/test/CodeGen/X86/smul_fix_sat_constants.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/smul_fix_sat_constants.ll?rev=368541&r1=368540&r2=368541&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/smul_fix_sat_constants.ll (original)
+++ llvm/trunk/test/CodeGen/X86/smul_fix_sat_constants.ll Sun Aug 11 12:27:14 2019
@@ -23,8 +23,8 @@ define i64 @func() nounwind {
; X64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; X64-NEXT: cmovlq %rcx, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 3, i64 2, i32 2);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 3, i64 2, i32 2)
+ ret i64 %tmp
}
define i64 @func2() nounwind {
@@ -40,8 +40,8 @@ define i64 @func2() nounwind {
; X64-NEXT: imulq $2, %rax, %rax
; X64-NEXT: cmovoq %rcx, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 3, i64 2, i32 0);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 3, i64 2, i32 0)
+ ret i64 %tmp
}
define i64 @func3() nounwind {
@@ -58,8 +58,8 @@ define i64 @func3() nounwind {
; X64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; X64-NEXT: cmovlq %rcx, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 2);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 2)
+ ret i64 %tmp
}
define i64 @func4() nounwind {
@@ -76,8 +76,8 @@ define i64 @func4() nounwind {
; X64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; X64-NEXT: cmovlq %rcx, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 32);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 32)
+ ret i64 %tmp
}
define i64 @func5() nounwind {
@@ -96,6 +96,6 @@ define i64 @func5() nounwind {
; X64-NEXT: movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
; X64-NEXT: cmovlq %rcx, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 63);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.smul.fix.sat.i64(i64 9223372036854775807, i64 2, i32 63)
+ ret i64 %tmp
}
Modified: llvm/trunk/test/CodeGen/X86/ssub_sat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ssub_sat.ll?rev=368541&r1=368540&r2=368541&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ssub_sat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ssub_sat.ll Sun Aug 11 12:27:14 2019
@@ -33,8 +33,8 @@ define i32 @func(i32 %x, i32 %y) nounwin
; X64-NEXT: subl %esi, %edi
; X64-NEXT: cmovnol %edi, %eax
; X64-NEXT: retq
- %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y)
+ ret i32 %tmp
}
define i64 @func2(i64 %x, i64 %y) nounwind {
@@ -85,8 +85,8 @@ define i64 @func2(i64 %x, i64 %y) nounwi
; X64-NEXT: subq %rsi, %rdi
; X64-NEXT: cmovnoq %rdi, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y)
+ ret i64 %tmp
}
define i4 @func3(i4 %x, i4 %y) nounwind {
@@ -123,8 +123,8 @@ define i4 @func3(i4 %x, i4 %y) nounwind
; X64-NEXT: sarb $4, %al
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
- %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.ssub.sat.i4(i4 %x, i4 %y)
+ ret i4 %tmp
}
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -208,6 +208,6 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x
; X64-NEXT: pandn %xmm2, %xmm0
; X64-NEXT: por %xmm3, %xmm0
; X64-NEXT: retq
- %tmp = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ ret <4 x i32> %tmp
}
Modified: llvm/trunk/test/CodeGen/X86/uadd_sat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/uadd_sat.ll?rev=368541&r1=368540&r2=368541&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/uadd_sat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/uadd_sat.ll Sun Aug 11 12:27:14 2019
@@ -22,8 +22,8 @@ define i32 @func(i32 %x, i32 %y) nounwin
; X64-NEXT: movl $-1, %eax
; X64-NEXT: cmovael %edi, %eax
; X64-NEXT: retq
- %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.uadd.sat.i32(i32 %x, i32 %y)
+ ret i32 %tmp
}
define i64 @func2(i64 %x, i64 %y) nounwind {
@@ -44,8 +44,8 @@ define i64 @func2(i64 %x, i64 %y) nounwi
; X64-NEXT: movq $-1, %rax
; X64-NEXT: cmovaeq %rdi, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.uadd.sat.i64(i64 %x, i64 %y)
+ ret i64 %tmp
}
define i4 @func3(i4 %x, i4 %y) nounwind {
@@ -74,8 +74,8 @@ define i4 @func3(i4 %x, i4 %y) nounwind
; X64-NEXT: shrb $4, %al
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
- %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.uadd.sat.i4(i4 %x, i4 %y)
+ ret i4 %tmp
}
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -116,6 +116,6 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x
; X64-NEXT: pcmpgtd %xmm2, %xmm0
; X64-NEXT: por %xmm1, %xmm0
; X64-NEXT: retq
- %tmp = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ ret <4 x i32> %tmp
}
Modified: llvm/trunk/test/CodeGen/X86/umul_fix.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/umul_fix.ll?rev=368541&r1=368540&r2=368541&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/umul_fix.ll (original)
+++ llvm/trunk/test/CodeGen/X86/umul_fix.ll Sun Aug 11 12:27:14 2019
@@ -25,8 +25,8 @@ define i32 @func(i32 %x, i32 %y) nounwin
; X86-NEXT: mull {{[0-9]+}}(%esp)
; X86-NEXT: shrdl $2, %edx, %eax
; X86-NEXT: retl
- %tmp = call i32 @llvm.umul.fix.i32(i32 %x, i32 %y, i32 2);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.umul.fix.i32(i32 %x, i32 %y, i32 2)
+ ret i32 %tmp
}
define i64 @func2(i64 %x, i64 %y) nounwind {
@@ -68,8 +68,8 @@ define i64 @func2(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 2);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 2)
+ ret i64 %tmp
}
define i4 @func3(i4 %x, i4 %y) nounwind {
@@ -95,8 +95,8 @@ define i4 @func3(i4 %x, i4 %y) nounwind
; X86-NEXT: shrb $2, %al
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
- %tmp = call i4 @llvm.umul.fix.i4(i4 %x, i4 %y, i32 2);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.umul.fix.i4(i4 %x, i4 %y, i32 2)
+ ret i4 %tmp
}
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -152,8 +152,8 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl $4
- %tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 2)
+ ret <4 x i32> %tmp
}
; These result in regular integer multiplication
@@ -169,8 +169,8 @@ define i32 @func4(i32 %x, i32 %y) nounwi
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: imull {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
- %tmp = call i32 @llvm.umul.fix.i32(i32 %x, i32 %y, i32 0);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.umul.fix.i32(i32 %x, i32 %y, i32 0)
+ ret i32 %tmp
}
define i64 @func5(i64 %x, i64 %y) nounwind {
@@ -193,8 +193,8 @@ define i64 @func5(i64 %x, i64 %y) nounwi
; X86-NEXT: addl %esi, %edx
; X86-NEXT: popl %esi
; X86-NEXT: retl
- %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 0);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 0)
+ ret i64 %tmp
}
define i4 @func6(i4 %x, i4 %y) nounwind {
@@ -215,8 +215,8 @@ define i4 @func6(i4 %x, i4 %y) nounwind
; X86-NEXT: andb $15, %cl
; X86-NEXT: mulb %cl
; X86-NEXT: retl
- %tmp = call i4 @llvm.umul.fix.i4(i4 %x, i4 %y, i32 0);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.umul.fix.i4(i4 %x, i4 %y, i32 0)
+ ret i4 %tmp
}
define <4 x i32> @vec2(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -251,8 +251,8 @@ define <4 x i32> @vec2(<4 x i32> %x, <4
; X86-NEXT: popl %esi
; X86-NEXT: popl %edi
; X86-NEXT: retl $4
- %tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.umul.fix.v4i32(<4 x i32> %x, <4 x i32> %y, i32 0)
+ ret <4 x i32> %tmp
}
define i64 @func7(i64 %x, i64 %y) nounwind {
@@ -291,8 +291,8 @@ define i64 @func7(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 32);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 32)
+ ret i64 %tmp
}
define i64 @func8(i64 %x, i64 %y) nounwind {
@@ -340,8 +340,8 @@ define i64 @func8(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 63);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 63)
+ ret i64 %tmp
}
define i64 @func9(i64 %x, i64 %y) nounwind {
@@ -388,6 +388,6 @@ define i64 @func9(i64 %x, i64 %y) nounwi
; X86-NEXT: popl %ebx
; X86-NEXT: popl %ebp
; X86-NEXT: retl
- %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 64);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.umul.fix.i64(i64 %x, i64 %y, i32 64)
+ ret i64 %tmp
}
Modified: llvm/trunk/test/CodeGen/X86/usub_sat.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/usub_sat.ll?rev=368541&r1=368540&r2=368541&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/usub_sat.ll (original)
+++ llvm/trunk/test/CodeGen/X86/usub_sat.ll Sun Aug 11 12:27:14 2019
@@ -22,8 +22,8 @@ define i32 @func(i32 %x, i32 %y) nounwin
; X64-NEXT: subl %esi, %edi
; X64-NEXT: cmovael %edi, %eax
; X64-NEXT: retq
- %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y);
- ret i32 %tmp;
+ %tmp = call i32 @llvm.usub.sat.i32(i32 %x, i32 %y)
+ ret i32 %tmp
}
define i64 @func2(i64 %x, i64 %y) nounwind {
@@ -44,8 +44,8 @@ define i64 @func2(i64 %x, i64 %y) nounwi
; X64-NEXT: subq %rsi, %rdi
; X64-NEXT: cmovaeq %rdi, %rax
; X64-NEXT: retq
- %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y);
- ret i64 %tmp;
+ %tmp = call i64 @llvm.usub.sat.i64(i64 %x, i64 %y)
+ ret i64 %tmp
}
define i4 @func3(i4 %x, i4 %y) nounwind {
@@ -74,8 +74,8 @@ define i4 @func3(i4 %x, i4 %y) nounwind
; X64-NEXT: shrb $4, %al
; X64-NEXT: # kill: def $al killed $al killed $eax
; X64-NEXT: retq
- %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y);
- ret i4 %tmp;
+ %tmp = call i4 @llvm.usub.sat.i4(i4 %x, i4 %y)
+ ret i4 %tmp
}
define <4 x i32> @vec(<4 x i32> %x, <4 x i32> %y) nounwind {
@@ -117,6 +117,6 @@ define <4 x i32> @vec(<4 x i32> %x, <4 x
; X64-NEXT: psubd %xmm1, %xmm0
; X64-NEXT: pand %xmm2, %xmm0
; X64-NEXT: retq
- %tmp = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y);
- ret <4 x i32> %tmp;
+ %tmp = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
+ ret <4 x i32> %tmp
}
More information about the llvm-commits
mailing list