[llvm] r334404 - [x86] add scalar cvtt intrinsic tests; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 11 06:51:34 PDT 2018


Author: spatel
Date: Mon Jun 11 06:51:34 2018
New Revision: 334404

URL: http://llvm.org/viewvc/llvm-project?rev=334404&view=rev
Log:
[x86] add scalar cvtt intrinsic tests; NFC

More coverage for the problem noted in D47993 (although these shouldn't be affected by that patch).

Modified:
    llvm/trunk/test/CodeGen/X86/avx-cvttp2si.ll
    llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll

Modified: llvm/trunk/test/CodeGen/X86/avx-cvttp2si.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx-cvttp2si.ll?rev=334404&r1=334403&r2=334404&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx-cvttp2si.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx-cvttp2si.ll Mon Jun 11 06:51:34 2018
@@ -2,7 +2,7 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx              | FileCheck %s --check-prefixes=AVX,AVX1
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx512f,avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
 
-; PR37551 - https://bugs.llvm.org/show_bug.cgi?id=37751
+; PR37751 - https://bugs.llvm.org/show_bug.cgi?id=37751
 ; We can't combine into 'round' instructions because the behavior is different for out-of-range values.
 
 declare <8 x i32> @llvm.x86.avx.cvtt.ps2dq.256(<8 x float>)

Modified: llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll?rev=334404&r1=334403&r2=334404&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll (original)
+++ llvm/trunk/test/CodeGen/X86/sse-cvttp2si.ll Mon Jun 11 06:51:34 2018
@@ -3,11 +3,159 @@
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx              | FileCheck %s --check-prefixes=AVX,AVX1
 ; RUN: llc < %s -mtriple=x86_64-- -mattr=avx512f,avx512vl | FileCheck %s --check-prefixes=AVX,AVX512
 
-; PR37551 - https://bugs.llvm.org/show_bug.cgi?id=37751
+; PR37751 - https://bugs.llvm.org/show_bug.cgi?id=37751
 ; We can't combine into 'round' instructions because the behavior is different for out-of-range values.
 
 declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>)
 declare <4 x i32> @llvm.x86.sse2.cvttpd2dq(<2 x double>)
+declare i32 @llvm.x86.sse.cvttss2si(<4 x float>)
+declare i64 @llvm.x86.sse.cvttss2si64(<4 x float>)
+declare i32 @llvm.x86.sse2.cvttsd2si(<2 x double>)
+declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>)
+
+define float @float_to_int_to_float_mem_f32_i32(<4 x float>* %p) {
+; SSE-LABEL: float_to_int_to_float_mem_f32_i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttss2si (%rdi), %eax
+; SSE-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_mem_f32_i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttss2si (%rdi), %eax
+; AVX-NEXT:    vcvtsi2ssl %eax, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x = load <4 x float>, <4 x float>* %p, align 16
+  %fptosi = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %x)
+  %sitofp = sitofp i32 %fptosi to float
+  ret float %sitofp
+}
+
+define float @float_to_int_to_float_reg_f32_i32(<4 x float> %x) {
+; SSE-LABEL: float_to_int_to_float_reg_f32_i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttss2si %xmm0, %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_reg_f32_i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttss2si %xmm0, %eax
+; AVX-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %fptosi = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %x)
+  %sitofp = sitofp i32 %fptosi to float
+  ret float %sitofp
+}
+
+define float @float_to_int_to_float_mem_f32_i64(<4 x float>* %p) {
+; SSE-LABEL: float_to_int_to_float_mem_f32_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttss2si (%rdi), %rax
+; SSE-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_mem_f32_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttss2si (%rdi), %rax
+; AVX-NEXT:    vcvtsi2ssq %rax, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x = load <4 x float>, <4 x float>* %p, align 16
+  %fptosi = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %x)
+  %sitofp = sitofp i64 %fptosi to float
+  ret float %sitofp
+}
+
+define float @float_to_int_to_float_reg_f32_i64(<4 x float> %x) {
+; SSE-LABEL: float_to_int_to_float_reg_f32_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttss2si %xmm0, %rax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_reg_f32_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttss2si %xmm0, %rax
+; AVX-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %fptosi = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %x)
+  %sitofp = sitofp i64 %fptosi to float
+  ret float %sitofp
+}
+
+define double @float_to_int_to_float_mem_f64_i32(<2 x double>* %p) {
+; SSE-LABEL: float_to_int_to_float_mem_f64_i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttsd2si (%rdi), %eax
+; SSE-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_mem_f64_i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttsd2si (%rdi), %eax
+; AVX-NEXT:    vcvtsi2sdl %eax, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x = load <2 x double>, <2 x double>* %p, align 16
+  %fptosi = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %x)
+  %sitofp = sitofp i32 %fptosi to double
+  ret double %sitofp
+}
+
+define double @float_to_int_to_float_reg_f64_i32(<2 x double> %x) {
+; SSE-LABEL: float_to_int_to_float_reg_f64_i32:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttsd2si %xmm0, %eax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2sdl %eax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_reg_f64_i32:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttsd2si %xmm0, %eax
+; AVX-NEXT:    vcvtsi2sdl %eax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %fptosi = tail call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %x)
+  %sitofp = sitofp i32 %fptosi to double
+  ret double %sitofp
+}
+
+define double @float_to_int_to_float_mem_f64_i64(<2 x double>* %p) {
+; SSE-LABEL: float_to_int_to_float_mem_f64_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttsd2si (%rdi), %rax
+; SSE-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_mem_f64_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttsd2si (%rdi), %rax
+; AVX-NEXT:    vcvtsi2sdq %rax, %xmm0, %xmm0
+; AVX-NEXT:    retq
+  %x = load <2 x double>, <2 x double>* %p, align 16
+  %fptosi = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %x)
+  %sitofp = sitofp i64 %fptosi to double
+  ret double %sitofp
+}
+
+define double @float_to_int_to_float_reg_f64_i64(<2 x double> %x) {
+; SSE-LABEL: float_to_int_to_float_reg_f64_i64:
+; SSE:       # %bb.0:
+; SSE-NEXT:    cvttsd2si %xmm0, %rax
+; SSE-NEXT:    xorps %xmm0, %xmm0
+; SSE-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE-NEXT:    retq
+;
+; AVX-LABEL: float_to_int_to_float_reg_f64_i64:
+; AVX:       # %bb.0:
+; AVX-NEXT:    vcvttsd2si %xmm0, %rax
+; AVX-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
+; AVX-NEXT:    retq
+  %fptosi = tail call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %x)
+  %sitofp = sitofp i64 %fptosi to double
+  ret double %sitofp
+}
 
 define <4 x float> @float_to_int_to_float_mem_v4f32(<4 x float>* %p) {
 ; SSE-LABEL: float_to_int_to_float_mem_v4f32:




More information about the llvm-commits mailing list