[llvm] r197615 - [aarch32] fix bug 18268: Incorrect condition of vsel

Weiming Zhao weimingz at codeaurora.org
Wed Dec 18 14:25:17 PST 2013


Author: weimingz
Date: Wed Dec 18 16:25:17 2013
New Revision: 197615

URL: http://llvm.org/viewvc/llvm-project?rev=197615&view=rev
Log:
[aarch32] fix bug 18268:  Incorrect condition of vsel

Given vsel_cc, op1, op2, since vsel has no LE/LT, to generate vsel for
such selection, it needs to inverse cc and swap op1 and op2. To inverse
cc, both L/G and E bits should be flipped.


Modified:
    llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
    llvm/trunk/test/CodeGen/ARM/vsel.ll

Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=197615&r1=197614&r2=197615&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Wed Dec 18 16:25:17 2013
@@ -3232,7 +3232,7 @@ SDValue ARMTargetLowering::LowerSELECT(S
 static ISD::CondCode getInverseCCForVSEL(ISD::CondCode CC) {
   if (CC == ISD::SETNE)
     return ISD::SETEQ;
-  return ISD::getSetCCSwappedOperands(CC);
+  return ISD::getSetCCInverse(CC, true);
 }
 
 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,

Modified: llvm/trunk/test/CodeGen/ARM/vsel.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/vsel.ll?rev=197615&r1=197614&r2=197615&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/vsel.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/vsel.ll Wed Dec 18 16:25:17 2013
@@ -61,7 +61,7 @@ define void @test_vsel32slt(i32 %lhs32,
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, float* @varfloat
 ; CHECK: cmp r0, r1
-; CHECK: vselgt.f32 s0, s1, s0
+; CHECK: vselge.f32 s0, s1, s0
   ret void
 }
 define void @test_vsel64slt(i32 %lhs32, i32 %rhs32, double %a, double %b) {
@@ -70,7 +70,7 @@ define void @test_vsel64slt(i32 %lhs32,
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, double* @vardouble
 ; CHECK: cmp r0, r1
-; CHECK: vselgt.f64 d16, d1, d0
+; CHECK: vselge.f64 d16, d1, d0
   ret void
 }
 define void @test_vsel32sle(i32 %lhs32, i32 %rhs32, float %a, float %b) {
@@ -79,7 +79,7 @@ define void @test_vsel32sle(i32 %lhs32,
   %val1 = select i1 %tst1, float %a, float %b
   store float %val1, float* @varfloat
 ; CHECK: cmp r0, r1
-; CHECK: vselge.f32 s0, s1, s0
+; CHECK: vselgt.f32 s0, s1, s0
   ret void
 }
 define void @test_vsel64sle(i32 %lhs32, i32 %rhs32, double %a, double %b) {
@@ -88,7 +88,7 @@ define void @test_vsel64sle(i32 %lhs32,
   %val1 = select i1 %tst1, double %a, double %b
   store double %val1, double* @vardouble
 ; CHECK: cmp r0, r1
-; CHECK: vselge.f64 d16, d1, d0
+; CHECK: vselgt.f64 d16, d1, d0
   ret void
 }
 define void @test_vsel32ogt(float %lhs32, float %rhs32, float %a, float %b) {





More information about the llvm-commits mailing list