[PATCH] R600/SI: Add i64 cmp instructions

Tom Stellard tom at stellard.net
Fri Nov 22 15:17:16 PST 2013


On Fri, Nov 22, 2013 at 02:03:21PM -0800, Matt Arsenault wrote:
> I'm not sure what's going on with V_CMP_NE. I don't see it in the ISA manual (F, LT, EQ, LE, GT, LG, GE, T are listed, where I don't know what T or F are. Always true and always false?), but it was already listed here (but without a pattern like the others), so I think it's supposed to be CMP_EQ with the negate modifier bit.
> 

In the manual, V_CMP_NE is called V_CMP_NEQ it is listed down with the
other N* conditions.  V_CMP_T always returns 1 and V_CMP_F always
returns 0.

Can you add -verify-machineinstrs to the testcase?  With that change,
this patch LGTM.

-Tom

> http://llvm-reviews.chandlerc.com/D2256
> 
> Files:
>   lib/Target/R600/AMDGPUInstructions.td
>   lib/Target/R600/SIInstructions.td
>   test/CodeGen/R600/icmp64.ll

> Index: lib/Target/R600/AMDGPUInstructions.td
> ===================================================================
> --- lib/Target/R600/AMDGPUInstructions.td
> +++ lib/Target/R600/AMDGPUInstructions.td
> @@ -68,6 +68,16 @@
>                       case ISD::SETGT: return true;}}}]
>  >;
>  
> +def COND_SGT : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETGT;}]
> +>;
> +
> +def COND_UGT : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETUGT;}]
> +>;
> +
>  def COND_OGT : PatLeaf <
>    (cond),
>    [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
> @@ -80,6 +90,16 @@
>                       case ISD::SETGE: return true;}}}]
>  >;
>  
> +def COND_SGE : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETGE;}]
> +>;
> +
> +def COND_UGE : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETUGE;}]
> +>;
> +
>  def COND_OGE : PatLeaf <
>    (cond),
>    [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
> @@ -92,6 +112,16 @@
>                       case ISD::SETLT: return true;}}}]
>  >;
>  
> +def COND_SLT : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETLT;}]
> +>;
> +
> +def COND_ULT : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETULT;}]
> +>;
> +
>  def COND_LE : PatLeaf <
>    (cond),
>    [{switch(N->get()){{default: return false;
> @@ -99,6 +129,16 @@
>                       case ISD::SETLE: return true;}}}]
>  >;
>  
> +def COND_SLE : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETLE;}]
> +>;
> +
> +def COND_ULE : PatLeaf <
> +  (cond),
> +  [{return N->get() == ISD::SETULE;}]
> +>;
> +
>  def COND_NULL : PatLeaf <
>    (cond),
>    [{return false;}]
> Index: lib/Target/R600/SIInstructions.td
> ===================================================================
> --- lib/Target/R600/SIInstructions.td
> +++ lib/Target/R600/SIInstructions.td
> @@ -317,12 +317,12 @@
>  } // End hasSideEffects = 1, Defs = [EXEC]
>  
>  defm V_CMP_F_I64 : VOPC_64 <0x000000a0, "V_CMP_F_I64">;
> -defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64">;
> -defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64">;
> -defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64">;
> -defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64">;
> -defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64">;
> -defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64">;
> +defm V_CMP_LT_I64 : VOPC_64 <0x000000a1, "V_CMP_LT_I64", i64, COND_SLT>;
> +defm V_CMP_EQ_I64 : VOPC_64 <0x000000a2, "V_CMP_EQ_I64", i64, COND_EQ>;
> +defm V_CMP_LE_I64 : VOPC_64 <0x000000a3, "V_CMP_LE_I64", i64, COND_SLE>;
> +defm V_CMP_GT_I64 : VOPC_64 <0x000000a4, "V_CMP_GT_I64", i64, COND_SGT>;
> +defm V_CMP_NE_I64 : VOPC_64 <0x000000a5, "V_CMP_NE_I64", i64, COND_NE>;
> +defm V_CMP_GE_I64 : VOPC_64 <0x000000a6, "V_CMP_GE_I64", i64, COND_SGE>;
>  defm V_CMP_T_I64 : VOPC_64 <0x000000a7, "V_CMP_T_I64">;
>  
>  let hasSideEffects = 1, Defs = [EXEC] in {
> @@ -361,12 +361,12 @@
>  } // End hasSideEffects = 1, Defs = [EXEC]
>  
>  defm V_CMP_F_U64 : VOPC_64 <0x000000e0, "V_CMP_F_U64">;
> -defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64">;
> -defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64">;
> -defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64">;
> -defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64">;
> -defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64">;
> -defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64">;
> +defm V_CMP_LT_U64 : VOPC_64 <0x000000e1, "V_CMP_LT_U64", i64, COND_ULT>;
> +defm V_CMP_EQ_U64 : VOPC_64 <0x000000e2, "V_CMP_EQ_U64", i64, COND_EQ>; // XXX - Why is there an unsigned version?
> +defm V_CMP_LE_U64 : VOPC_64 <0x000000e3, "V_CMP_LE_U64", i64, COND_ULE>;
> +defm V_CMP_GT_U64 : VOPC_64 <0x000000e4, "V_CMP_GT_U64", i64, COND_UGT>;
> +defm V_CMP_NE_U64 : VOPC_64 <0x000000e5, "V_CMP_NE_U64", i64, COND_NE>;
> +defm V_CMP_GE_U64 : VOPC_64 <0x000000e6, "V_CMP_GE_U64", i64, COND_UGE>;
>  defm V_CMP_T_U64 : VOPC_64 <0x000000e7, "V_CMP_T_U64">;
>  
>  let hasSideEffects = 1, Defs = [EXEC] in {
> Index: test/CodeGen/R600/icmp64.ll
> ===================================================================
> --- /dev/null
> +++ test/CodeGen/R600/icmp64.ll
> @@ -0,0 +1,93 @@
> +; RUN: llc -march=r600 -mcpu=SI < %s | FileCheck -check-prefix=SI %s
> +
> +; SI-LABEL: @test_i64_eq:
> +; SI: V_CMP_EQ_I64
> +define void @test_i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp eq i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; XSI-LABEL: @test_i64_ne:
> +; XSI: V_CMP_EQ_I64XXX
> +; FIXME: How check for negate?
> +; define void @test_i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +;   %cmp = icmp ne i64 %a, %b
> +;   %result = sext i1 %cmp to i32
> +;   store i32 %result, i32 addrspace(1)* %out, align 4
> +;   ret void
> +; }
> +
> +; SI-LABEL: @test_i64_slt:
> +; SI: V_CMP_LT_I64
> +define void @test_i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp slt i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @test_i64_ult:
> +; SI: V_CMP_LT_U64
> +define void @test_i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp ult i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @test_i64_sle:
> +; SI: V_CMP_LE_I64
> +define void @test_i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp sle i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @test_i64_ule:
> +; SI: V_CMP_LE_U64
> +define void @test_i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp ule i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @test_i64_sgt:
> +; SI: V_CMP_GT_I64
> +define void @test_i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp sgt i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @test_i64_ugt:
> +; SI: V_CMP_GT_U64
> +define void @test_i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp ugt i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @test_i64_sge:
> +; SI: V_CMP_GE_I64
> +define void @test_i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp sge i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +
> +; SI-LABEL: @test_i64_uge:
> +; SI: V_CMP_GE_U64
> +define void @test_i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> +  %cmp = icmp uge i64 %a, %b
> +  %result = sext i1 %cmp to i32
> +  store i32 %result, i32 addrspace(1)* %out, align 4
> +  ret void
> +}
> +

> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits




More information about the llvm-commits mailing list