[llvm] fe7bc71 - [X86] Add abs(sext_inreg(x)) test coverage for Issue #43370
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sun Oct 23 10:17:25 PDT 2022
Author: Simon Pilgrim
Date: 2022-10-23T18:17:19+01:00
New Revision: fe7bc7153aca60285b7073af3adcab02b8270317
URL: https://github.com/llvm/llvm-project/commit/fe7bc7153aca60285b7073af3adcab02b8270317
DIFF: https://github.com/llvm/llvm-project/commit/fe7bc7153aca60285b7073af3adcab02b8270317.diff
LOG: [X86] Add abs(sext_inreg(x)) test coverage for Issue #43370
Added:
Modified:
llvm/test/CodeGen/X86/abs.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/X86/abs.ll b/llvm/test/CodeGen/X86/abs.ll
index 80a4eb6ccba1..e01c139101d4 100644
--- a/llvm/test/CodeGen/X86/abs.ll
+++ b/llvm/test/CodeGen/X86/abs.ll
@@ -611,3 +611,116 @@ define <16 x i8> @test_v16i8(<16 x i8> %a) nounwind {
%r = call <16 x i8> @llvm.abs.v16i8(<16 x i8> %a, i1 false)
ret <16 x i8> %r
}
+
+define i16 @test_sextinreg_i16(i16 %a) nounwind {
+; X64-LABEL: test_sextinreg_i16:
+; X64: # %bb.0:
+; X64-NEXT: movsbl %dil, %ecx
+; X64-NEXT: movl %ecx, %eax
+; X64-NEXT: negw %ax
+; X64-NEXT: cmovsw %cx, %ax
+; X64-NEXT: retq
+;
+; X86-LABEL: test_sextinreg_i16:
+; X86: # %bb.0:
+; X86-NEXT: movsbl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: negw %ax
+; X86-NEXT: cmovsw %cx, %ax
+; X86-NEXT: retl
+ %shl = shl i16 %a, 8
+ %ashr = ashr exact i16 %shl, 8
+ %res = call i16 @llvm.abs.i16(i16 %ashr, i1 true)
+ ret i16 %res
+}
+
+define i32 @test_sextinreg_i32(i32 %a) nounwind {
+; X64-LABEL: test_sextinreg_i32:
+; X64: # %bb.0:
+; X64-NEXT: movswl %di, %ecx
+; X64-NEXT: movl %ecx, %eax
+; X64-NEXT: negl %eax
+; X64-NEXT: cmovsl %ecx, %eax
+; X64-NEXT: retq
+;
+; X86-LABEL: test_sextinreg_i32:
+; X86: # %bb.0:
+; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %eax
+; X86-NEXT: negl %eax
+; X86-NEXT: cmovsl %ecx, %eax
+; X86-NEXT: retl
+ %shl = shl i32 %a, 16
+ %ashr = ashr exact i32 %shl, 16
+ %res = call i32 @llvm.abs.i32(i32 %ashr, i1 true)
+ ret i32 %res
+}
+
+define i64 @test_sextinreg_i64(i64 %a) nounwind {
+; X64-LABEL: test_sextinreg_i64:
+; X64: # %bb.0:
+; X64-NEXT: movslq %edi, %rcx
+; X64-NEXT: movq %rcx, %rax
+; X64-NEXT: negq %rax
+; X64-NEXT: cmovsq %rcx, %rax
+; X64-NEXT: retq
+;
+; X86-LABEL: test_sextinreg_i64:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl %eax, %ecx
+; X86-NEXT: sarl $31, %ecx
+; X86-NEXT: xorl %ecx, %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: sbbl %ecx, %edx
+; X86-NEXT: retl
+ %shl = shl i64 %a, 32
+ %ashr = ashr exact i64 %shl, 32
+ %res = call i64 @llvm.abs.i64(i64 %ashr, i1 true)
+ ret i64 %res
+}
+
+define i128 @test_sextinreg_i128(i128 %a) nounwind {
+; X64-LABEL: test_sextinreg_i128:
+; X64: # %bb.0:
+; X64-NEXT: movq %rdi, %rax
+; X64-NEXT: movq %rdi, %rcx
+; X64-NEXT: sarq $63, %rcx
+; X64-NEXT: xorq %rcx, %rax
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: subq %rcx, %rax
+; X64-NEXT: sbbq %rcx, %rdx
+; X64-NEXT: retq
+;
+; X86-LABEL: test_sextinreg_i128:
+; X86: # %bb.0:
+; X86-NEXT: pushl %ebx
+; X86-NEXT: pushl %edi
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl %ecx, %edx
+; X86-NEXT: sarl $31, %edx
+; X86-NEXT: xorl %edx, %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: xorl %edx, %esi
+; X86-NEXT: xorl %edi, %edi
+; X86-NEXT: subl %edx, %esi
+; X86-NEXT: sbbl %edx, %ecx
+; X86-NEXT: movl $0, %ebx
+; X86-NEXT: sbbl %edx, %ebx
+; X86-NEXT: sbbl %edx, %edi
+; X86-NEXT: movl %esi, (%eax)
+; X86-NEXT: movl %ecx, 4(%eax)
+; X86-NEXT: movl %ebx, 8(%eax)
+; X86-NEXT: movl %edi, 12(%eax)
+; X86-NEXT: popl %esi
+; X86-NEXT: popl %edi
+; X86-NEXT: popl %ebx
+; X86-NEXT: retl $4
+ %shl = shl i128 %a, 64
+ %ashr = ashr exact i128 %shl, 64
+ %res = call i128 @llvm.abs.i128(i128 %ashr, i1 true)
+ ret i128 %res
+}
More information about the llvm-commits
mailing list