[llvm] r274846 - [X86][SSE] Improve constant folding tests for CVTSD/CVTSS/CVTTSD/CVTTSS

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 8 06:28:34 PDT 2016


Author: rksimon
Date: Fri Jul  8 08:28:34 2016
New Revision: 274846

URL: http://llvm.org/viewvc/llvm-project?rev=274846&view=rev
Log:
[X86][SSE] Improve constant folding tests for CVTSD/CVTSS/CVTTSD/CVTTSS

As discussed on D22106, improve the testing for constant folding sse scalar conversion intrinsics to ensure we are correctly handling special/out of range cases

Modified:
    llvm/trunk/test/Transforms/ConstProp/calls.ll

Modified: llvm/trunk/test/Transforms/ConstProp/calls.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ConstProp/calls.ll?rev=274846&r1=274845&r2=274846&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ConstProp/calls.ll (original)
+++ llvm/trunk/test/Transforms/ConstProp/calls.ll Fri Jul  8 08:28:34 2016
@@ -176,27 +176,198 @@ define double @T() {
   ret double %d
 }
 
-define i1 @test_sse_cvt() nounwind readnone {
-; CHECK-LABEL: @test_sse_cvt(
+define i1 @test_sse_cvts_exact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_exact(
+; CHECK-NOT: call
+; CHECK: ret i1 true
+entry:
+  %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> <double 7.0, double undef>) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> <double 7.0, double undef>) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %cmp02 = icmp eq i32 %sum02, 10
+  %cmp13 = icmp eq i64 %sum13, 10
+  %b = and i1 %cmp02, %cmp13
+  ret i1 %b
+}
+
+; TODO: Inexact values should not fold as they are dependent on rounding mode
+define i1 @test_sse_cvts_inexact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_inexact(
 ; CHECK-NOT: call
 ; CHECK: ret i1 true
 entry:
   %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
-  %i1 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
-  %i2 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
-  %i3 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
-  %i4 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> <double 1.75, double undef>) nounwind
-  %i5 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> <double 1.75, double undef>) nounwind
-  %i6 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> <double 1.75, double undef>) nounwind
-  %i7 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> <double 1.75, double undef>) nounwind
-  %sum11 = add i32 %i0, %i1
-  %sum12 = add i32 %i4, %i5
-  %sum1 = add i32 %sum11, %sum12
-  %sum21 = add i64 %i2, %i3
-  %sum22 = add i64 %i6, %i7
-  %sum2 = add i64 %sum21, %sum22
-  %sum1.sext = sext i32 %sum1 to i64
-  %b = icmp eq i64 %sum1.sext, %sum2
+  %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> <double 1.75, double undef>) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> <double 1.75, double undef>) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %cmp02 = icmp eq i32 %sum02, 4
+  %cmp13 = icmp eq i64 %sum13, 4
+  %b = and i1 %cmp02, %cmp13
+  ret i1 %b
+}
+
+; FLT_MAX/DBL_MAX should not fold
+define i1 @test_sse_cvts_max() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_max(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+  %fm = bitcast <4 x i32> <i32 2139095039, i32 undef, i32 undef, i32 undef> to <4 x float>
+  %dm = bitcast <2 x i64> <i64 9218868437227405311, i64 undef> to <2 x double>
+  %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %sum02.sext = sext i32 %sum02 to i64
+  %b = icmp eq i64 %sum02.sext, %sum13
+  ret i1 %b
+}
+
+; INF should not fold
+define i1 @test_sse_cvts_inf() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_inf(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+  %fm = bitcast <4 x i32> <i32 2139095040, i32 undef, i32 undef, i32 undef> to <4 x float>
+  %dm = bitcast <2 x i64> <i64 9218868437227405312, i64 undef> to <2 x double>
+  %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %sum02.sext = sext i32 %sum02 to i64
+  %b = icmp eq i64 %sum02.sext, %sum13
+  ret i1 %b
+}
+
+; NAN should not fold
+define i1 @test_sse_cvts_nan() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvts_nan(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+  %fm = bitcast <4 x i32> <i32 2143289344, i32 undef, i32 undef, i32 undef> to <4 x float>
+  %dm = bitcast <2 x i64> <i64 9221120237041090560, i64 undef> to <2 x double>
+  %i0 = tail call i32 @llvm.x86.sse.cvtss2si(<4 x float> %fm) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvtss2si64(<4 x float> %fm) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %dm) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvtsd2si64(<2 x double> %dm) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %sum02.sext = sext i32 %sum02 to i64
+  %b = icmp eq i64 %sum02.sext, %sum13
+  ret i1 %b
+}
+
+define i1 @test_sse_cvtts_exact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_exact(
+; CHECK-NOT: call
+; CHECK: ret i1 true
+entry:
+  %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 3.0, float undef, float undef, float undef>) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> <double 7.0, double undef>) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> <double 7.0, double undef>) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %cmp02 = icmp eq i32 %sum02, 10
+  %cmp13 = icmp eq i64 %sum13, 10
+  %b = and i1 %cmp02, %cmp13
+  ret i1 %b
+}
+
+define i1 @test_sse_cvtts_inexact() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_inexact(
+; CHECK-NOT: call
+; CHECK: ret i1 true
+entry:
+  %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> <float 1.75, float undef, float undef, float undef>) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> <double 1.75, double undef>) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> <double 1.75, double undef>) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %cmp02 = icmp eq i32 %sum02, 2
+  %cmp13 = icmp eq i64 %sum13, 2
+  %b = and i1 %cmp02, %cmp13
+  ret i1 %b
+}
+
+; FLT_MAX/DBL_MAX should not fold
+define i1 @test_sse_cvtts_max() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_max(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+  %fm = bitcast <4 x i32> <i32 2139095039, i32 undef, i32 undef, i32 undef> to <4 x float>
+  %dm = bitcast <2 x i64> <i64 9218868437227405311, i64 undef> to <2 x double>
+  %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %sum02.sext = sext i32 %sum02 to i64
+  %b = icmp eq i64 %sum02.sext, %sum13
+  ret i1 %b
+}
+
+; INF should not fold
+define i1 @test_sse_cvtts_inf() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_inf(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+  %fm = bitcast <4 x i32> <i32 2139095040, i32 undef, i32 undef, i32 undef> to <4 x float>
+  %dm = bitcast <2 x i64> <i64 9218868437227405312, i64 undef> to <2 x double>
+  %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %sum02.sext = sext i32 %sum02 to i64
+  %b = icmp eq i64 %sum02.sext, %sum13
+  ret i1 %b
+}
+
+; NAN should not fold
+define i1 @test_sse_cvtts_nan() nounwind readnone {
+; CHECK-LABEL: @test_sse_cvtts_nan(
+; CHECK: call
+; CHECK: call
+; CHECK: call
+; CHECK: call
+entry:
+  %fm = bitcast <4 x i32> <i32 2143289344, i32 undef, i32 undef, i32 undef> to <4 x float>
+  %dm = bitcast <2 x i64> <i64 9221120237041090560, i64 undef> to <2 x double>
+  %i0 = tail call i32 @llvm.x86.sse.cvttss2si(<4 x float> %fm) nounwind
+  %i1 = tail call i64 @llvm.x86.sse.cvttss2si64(<4 x float> %fm) nounwind
+  %i2 = call i32 @llvm.x86.sse2.cvttsd2si(<2 x double> %dm) nounwind
+  %i3 = call i64 @llvm.x86.sse2.cvttsd2si64(<2 x double> %dm) nounwind
+  %sum02 = add i32 %i0, %i2
+  %sum13 = add i64 %i1, %i3
+  %sum02.sext = sext i32 %sum02 to i64
+  %b = icmp eq i64 %sum02.sext, %sum13
   ret i1 %b
 }
 




More information about the llvm-commits mailing list