[llvm] r290306 - AMDGPU: setcc test cleanup

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 21 19:21:46 PST 2016


Author: arsenm
Date: Wed Dec 21 21:21:45 2016
New Revision: 290306

URL: http://llvm.org/viewvc/llvm-project?rev=290306&view=rev
Log:
AMDGPU: setcc test cleanup

Modified:
    llvm/trunk/test/CodeGen/AMDGPU/setcc.ll
    llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll

Modified: llvm/trunk/test/CodeGen/AMDGPU/setcc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setcc.ll?rev=290306&r1=290305&r2=290306&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/setcc.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/setcc.ll Wed Dec 21 21:21:45 2016
@@ -1,5 +1,5 @@
-; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck -check-prefix=SI -check-prefix=FUNC %s
-; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck --check-prefix=R600 --check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=FUNC %s
+; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=R600 -check-prefix=FUNC %s
 
 declare i32 @llvm.r600.read.tidig.x() nounwind readnone
 
@@ -7,7 +7,9 @@ declare i32 @llvm.r600.read.tidig.x() no
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[3].X, KC0[3].Z
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW]}}, KC0[2].W, KC0[3].Y
 
-define void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) {
+; GCN-DAG: v_cmp_eq_u32_e32
+; GCN-DAG: v_cmp_eq_u32_e64
+define void @setcc_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, <2 x i32> %b) #0 {
   %result = icmp eq <2 x i32> %a, %b
   %sext = sext <2 x i1> %result to <2 x i32>
   store <2 x i32> %sext, <2 x i32> addrspace(1)* %out
@@ -20,10 +22,14 @@ define void @setcc_v2i32(<2 x i32> addrs
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 ; R600-DAG: SETE_INT * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 
-define void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
+; GCN: v_cmp_eq_u32_e32
+; GCN: v_cmp_eq_u32_e64
+; GCN: v_cmp_eq_u32_e64
+; GCN: v_cmp_eq_u32_e64
+define void @setcc_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) #0 {
   %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
-  %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
-  %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
+  %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
+  %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
   %result = icmp eq <4 x i32> %a, %b
   %sext = sext <4 x i1> %result to <4 x i32>
   store <4 x i32> %sext, <4 x i32> addrspace(1)* %out
@@ -36,8 +42,8 @@ define void @setcc_v4i32(<4 x i32> addrs
 
 ; FUNC-LABEL: {{^}}f32_oeq:
 ; R600: SETE_DX10
-; SI: v_cmp_eq_f32
-define void @f32_oeq(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_eq_f32
+define void @f32_oeq(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp oeq float %a, %b
   %1 = sext i1 %0 to i32
@@ -47,8 +53,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}f32_ogt:
 ; R600: SETGT_DX10
-; SI: v_cmp_gt_f32
-define void @f32_ogt(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_gt_f32
+define void @f32_ogt(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp ogt float %a, %b
   %1 = sext i1 %0 to i32
@@ -58,8 +64,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}f32_oge:
 ; R600: SETGE_DX10
-; SI: v_cmp_ge_f32
-define void @f32_oge(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_ge_f32
+define void @f32_oge(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp oge float %a, %b
   %1 = sext i1 %0 to i32
@@ -69,8 +75,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}f32_olt:
 ; R600: SETGT_DX10
-; SI: v_cmp_lt_f32
-define void @f32_olt(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_lt_f32
+define void @f32_olt(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp olt float %a, %b
   %1 = sext i1 %0 to i32
@@ -80,8 +86,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}f32_ole:
 ; R600: SETGE_DX10
-; SI: v_cmp_le_f32
-define void @f32_ole(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_le_f32
+define void @f32_ole(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp ole float %a, %b
   %1 = sext i1 %0 to i32
@@ -97,9 +103,9 @@ entry:
 ; R600-DAG: AND_INT
 ; R600-DAG: SETNE_INT
 
-; SI: v_cmp_lg_f32_e32 vcc
-; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_one(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_lg_f32_e32 vcc
+; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f32_one(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp one float %a, %b
   %1 = sext i1 %0 to i32
@@ -112,8 +118,8 @@ entry:
 ; R600-DAG: SETE_DX10
 ; R600-DAG: AND_INT
 ; R600-DAG: SETNE_INT
-; SI: v_cmp_o_f32
-define void @f32_ord(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_o_f32
+define void @f32_ord(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp ord float %a, %b
   %1 = sext i1 %0 to i32
@@ -129,9 +135,9 @@ entry:
 ; R600-DAG: OR_INT
 ; R600-DAG: SETNE_INT
 
-; SI: v_cmp_nlg_f32_e32 vcc
-; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ueq(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_nlg_f32_e32 vcc
+; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f32_ueq(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp ueq float %a, %b
   %1 = sext i1 %0 to i32
@@ -142,9 +148,9 @@ entry:
 ; FUNC-LABEL: {{^}}f32_ugt:
 ; R600: SETGE
 ; R600: SETE_DX10
-; SI: v_cmp_nle_f32_e32 vcc
-; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ugt(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_nle_f32_e32 vcc
+; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f32_ugt(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp ugt float %a, %b
   %1 = sext i1 %0 to i32
@@ -156,9 +162,9 @@ entry:
 ; R600: SETGT
 ; R600: SETE_DX10
 
-; SI: v_cmp_nlt_f32_e32 vcc
-; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_uge(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_nlt_f32_e32 vcc
+; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f32_uge(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp uge float %a, %b
   %1 = sext i1 %0 to i32
@@ -170,9 +176,9 @@ entry:
 ; R600: SETGE
 ; R600: SETE_DX10
 
-; SI: v_cmp_nge_f32_e32 vcc
-; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ult(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_nge_f32_e32 vcc
+; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f32_ult(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp ult float %a, %b
   %1 = sext i1 %0 to i32
@@ -184,9 +190,9 @@ entry:
 ; R600: SETGT
 ; R600: SETE_DX10
 
-; SI: v_cmp_ngt_f32_e32 vcc
-; SI-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f32_ule(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_ngt_f32_e32 vcc
+; GCN-NEXT: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f32_ule(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp ule float %a, %b
   %1 = sext i1 %0 to i32
@@ -196,8 +202,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}f32_une:
 ; R600: SETNE_DX10
-; SI: v_cmp_neq_f32
-define void @f32_une(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_neq_f32
+define void @f32_une(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp une float %a, %b
   %1 = sext i1 %0 to i32
@@ -210,8 +216,8 @@ entry:
 ; R600: SETNE_DX10
 ; R600: OR_INT
 ; R600: SETNE_INT
-; SI: v_cmp_u_f32
-define void @f32_uno(i32 addrspace(1)* %out, float %a, float %b) {
+; GCN: v_cmp_u_f32
+define void @f32_uno(i32 addrspace(1)* %out, float %a, float %b) #0 {
 entry:
   %0 = fcmp uno float %a, %b
   %1 = sext i1 %0 to i32
@@ -225,8 +231,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_eq:
 ; R600: SETE_INT
-; SI: v_cmp_eq_u32
-define void @i32_eq(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_eq_u32
+define void @i32_eq(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp eq i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -236,8 +242,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ne:
 ; R600: SETNE_INT
-; SI: v_cmp_ne_u32
-define void @i32_ne(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_ne_u32
+define void @i32_ne(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ne i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -247,8 +253,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ugt:
 ; R600: SETGT_UINT
-; SI: v_cmp_gt_u32
-define void @i32_ugt(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_gt_u32
+define void @i32_ugt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ugt i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -258,8 +264,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_uge:
 ; R600: SETGE_UINT
-; SI: v_cmp_ge_u32
-define void @i32_uge(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_ge_u32
+define void @i32_uge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp uge i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -269,8 +275,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ult:
 ; R600: SETGT_UINT
-; SI: v_cmp_lt_u32
-define void @i32_ult(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_lt_u32
+define void @i32_ult(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ult i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -280,8 +286,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_ule:
 ; R600: SETGE_UINT
-; SI: v_cmp_le_u32
-define void @i32_ule(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_le_u32
+define void @i32_ule(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp ule i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -291,8 +297,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_sgt:
 ; R600: SETGT_INT
-; SI: v_cmp_gt_i32
-define void @i32_sgt(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_gt_i32
+define void @i32_sgt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp sgt i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -302,8 +308,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_sge:
 ; R600: SETGE_INT
-; SI: v_cmp_ge_i32
-define void @i32_sge(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_ge_i32
+define void @i32_sge(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp sge i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -313,8 +319,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_slt:
 ; R600: SETGT_INT
-; SI: v_cmp_lt_i32
-define void @i32_slt(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_lt_i32
+define void @i32_slt(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp slt i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -324,8 +330,8 @@ entry:
 
 ; FUNC-LABEL: {{^}}i32_sle:
 ; R600: SETGE_INT
-; SI: v_cmp_le_i32
-define void @i32_sle(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+; GCN: v_cmp_le_i32
+define void @i32_sle(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
 entry:
   %0 = icmp sle i32 %a, %b
   %1 = sext i1 %0 to i32
@@ -335,14 +341,14 @@ entry:
 
 ; FIXME: This does 4 compares
 ; FUNC-LABEL: {{^}}v3i32_eq:
-; SI-DAG: v_cmp_eq_u32
-; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
-; SI-DAG: v_cmp_eq_u32
-; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
-; SI-DAG: v_cmp_eq_u32
-; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
-; SI: s_endpgm
-define void @v3i32_eq(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %ptra, <3 x i32> addrspace(1)* %ptrb) {
+; GCN-DAG: v_cmp_eq_u32
+; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; GCN-DAG: v_cmp_eq_u32
+; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; GCN-DAG: v_cmp_eq_u32
+; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; GCN: s_endpgm
+define void @v3i32_eq(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %ptra, <3 x i32> addrspace(1)* %ptrb) #0 {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.a = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptra, i32 %tid
   %gep.b = getelementptr <3 x i32>, <3 x i32> addrspace(1)* %ptrb, i32 %tid
@@ -356,14 +362,14 @@ define void @v3i32_eq(<3 x i32> addrspac
 }
 
 ; FUNC-LABEL: {{^}}v3i8_eq:
-; SI-DAG: v_cmp_eq_u32
-; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
-; SI-DAG: v_cmp_eq_u32
-; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
-; SI-DAG: v_cmp_eq_u32
-; SI-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
-; SI: s_endpgm
-define void @v3i8_eq(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %ptra, <3 x i8> addrspace(1)* %ptrb) {
+; GCN-DAG: v_cmp_eq_u32
+; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; GCN-DAG: v_cmp_eq_u32
+; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; GCN-DAG: v_cmp_eq_u32
+; GCN-DAG: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1,
+; GCN: s_endpgm
+define void @v3i8_eq(<3 x i8> addrspace(1)* %out, <3 x i8> addrspace(1)* %ptra, <3 x i8> addrspace(1)* %ptrb) #0 {
   %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone
   %gep.a = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptra, i32 %tid
   %gep.b = getelementptr <3 x i8>, <3 x i8> addrspace(1)* %ptrb, i32 %tid
@@ -378,9 +384,9 @@ define void @v3i8_eq(<3 x i8> addrspace(
 
 ; Make sure we don't try to emit i1 setcc ops
 ; FUNC-LABEL: setcc-i1
-; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, 1
-; SI: s_cmp_eq_u32 [[AND]], 0
-define void @setcc-i1(i32 %in) {
+; GCN: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, 1
+; GCN: s_cmp_eq_u32 [[AND]], 0
+define void @setcc-i1(i32 %in) #0 {
   %and = and i32 %in, 1
   %cmp = icmp eq i32 %and, 0
   br i1 %cmp, label %endif, label %if
@@ -391,9 +397,9 @@ endif:
 }
 
 ; FUNC-LABEL: setcc-i1-and-xor
-; SI-DAG: v_cmp_ge_f32_e64 [[A:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}}
-; SI-DAG: v_cmp_le_f32_e64 [[B:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
-; SI: s_and_b64 s[2:3], [[A]], [[B]]
+; GCN-DAG: v_cmp_ge_f32_e64 [[A:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0{{$}}
+; GCN-DAG: v_cmp_le_f32_e64 [[B:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 1.0
+; GCN: s_and_b64 s[2:3], [[A]], [[B]]
 define void @setcc-i1-and-xor(i32 addrspace(1)* %out, float %cond) #0 {
 bb0:
   %tmp5 = fcmp oge float %cond, 0.000000e+00
@@ -409,3 +415,5 @@ bb1:
 bb2:
   ret void
 }
+
+attributes #0 = { nounwind }

Modified: llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll?rev=290306&r1=290305&r2=290306&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/setcc64.ll Wed Dec 21 21:21:45 2016
@@ -1,5 +1,5 @@
-;RUN: llc < %s -march=amdgcn -mcpu=SI -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
-;RUN: llc < %s -march=amdgcn -mcpu=tonga -verify-machineinstrs| FileCheck --check-prefix=SI --check-prefix=FUNC %s
+; RUN: llc -march=amdgcn -verify-machineinstrs < %s| FileCheck -check-prefix=GCN -check-prefix=SI %s
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
 
 ; XXX: Merge this into setcc, once R600 supports 64-bit operations
 
@@ -7,150 +7,150 @@
 ;; Double comparisons
 ;;;==========================================================================;;;
 
-; FUNC-LABEL: {{^}}f64_oeq:
-; SI: v_cmp_eq_f64
-define void @f64_oeq(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_oeq:
+; GCN: v_cmp_eq_f64
+define void @f64_oeq(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp oeq double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp oeq double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_ogt:
-; SI: v_cmp_gt_f64
-define void @f64_ogt(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_ogt:
+; GCN: v_cmp_gt_f64
+define void @f64_ogt(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp ogt double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp ogt double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_oge:
-; SI: v_cmp_ge_f64
-define void @f64_oge(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_oge:
+; GCN: v_cmp_ge_f64
+define void @f64_oge(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp oge double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp oge double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_olt:
-; SI: v_cmp_lt_f64
-define void @f64_olt(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_olt:
+; GCN: v_cmp_lt_f64
+define void @f64_olt(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp olt double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp olt double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_ole:
-; SI: v_cmp_le_f64
-define void @f64_ole(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_ole:
+; GCN: v_cmp_le_f64
+define void @f64_ole(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp ole double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp ole double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_one:
-; SI: v_cmp_lg_f64_e32 vcc
-; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_one(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_one:
+; GCN: v_cmp_lg_f64_e32 vcc
+; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f64_one(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp one double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp one double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_ord:
-; SI: v_cmp_o_f64
-define void @f64_ord(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_ord:
+; GCN: v_cmp_o_f64
+define void @f64_ord(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp ord double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp ord double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_ueq:
-; SI: v_cmp_nlg_f64_e32 vcc
-; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ueq(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_ueq:
+; GCN: v_cmp_nlg_f64_e32 vcc
+; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f64_ueq(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp ueq double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp ueq double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_ugt:
+; GCN-LABEL: {{^}}f64_ugt:
 
-; SI: v_cmp_nle_f64_e32 vcc
-; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ugt(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN: v_cmp_nle_f64_e32 vcc
+; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f64_ugt(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp ugt double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp ugt double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_uge:
-; SI: v_cmp_nlt_f64_e32 vcc
-; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_uge(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_uge:
+; GCN: v_cmp_nlt_f64_e32 vcc
+; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f64_uge(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp uge double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp uge double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_ult:
-; SI: v_cmp_nge_f64_e32 vcc
-; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ult(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_ult:
+; GCN: v_cmp_nge_f64_e32 vcc
+; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f64_ult(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp ult double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp ult double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_ule:
-; SI: v_cmp_ngt_f64_e32 vcc
-; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
-define void @f64_ule(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_ule:
+; GCN: v_cmp_ngt_f64_e32 vcc
+; GCN: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc
+define void @f64_ule(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp ule double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp ule double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_une:
-; SI: v_cmp_neq_f64
-define void @f64_une(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_une:
+; GCN: v_cmp_neq_f64
+define void @f64_une(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp une double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp une double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}f64_uno:
-; SI: v_cmp_u_f64
-define void @f64_uno(i32 addrspace(1)* %out, double %a, double %b) {
+; GCN-LABEL: {{^}}f64_uno:
+; GCN: v_cmp_u_f64
+define void @f64_uno(i32 addrspace(1)* %out, double %a, double %b) #0 {
 entry:
-  %0 = fcmp uno double %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = fcmp uno double %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
@@ -158,102 +158,104 @@ entry:
 ;; 64-bit integer comparisons
 ;;;==========================================================================;;;
 
-; FUNC-LABEL: {{^}}i64_eq:
-; SI: v_cmp_eq_u64
-define void @i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_eq:
+; GCN: v_cmp_eq_u64
+define void @i64_eq(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp eq i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp eq i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_ne:
-; SI: v_cmp_ne_u64
-define void @i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_ne:
+; GCN: v_cmp_ne_u64
+define void @i64_ne(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp ne i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp ne i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_ugt:
-; SI: v_cmp_gt_u64
-define void @i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_ugt:
+; GCN: v_cmp_gt_u64
+define void @i64_ugt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp ugt i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp ugt i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_uge:
-; SI: v_cmp_ge_u64
-define void @i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_uge:
+; GCN: v_cmp_ge_u64
+define void @i64_uge(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp uge i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp uge i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_ult:
-; SI: v_cmp_lt_u64
-define void @i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_ult:
+; GCN: v_cmp_lt_u64
+define void @i64_ult(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp ult i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp ult i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_ule:
-; SI: v_cmp_le_u64
-define void @i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_ule:
+; GCN: v_cmp_le_u64
+define void @i64_ule(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp ule i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp ule i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_sgt:
-; SI: v_cmp_gt_i64
-define void @i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_sgt:
+; GCN: v_cmp_gt_i64
+define void @i64_sgt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp sgt i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp sgt i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_sge:
-; SI: v_cmp_ge_i64
-define void @i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_sge:
+; GCN: v_cmp_ge_i64
+define void @i64_sge(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp sge i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp sge i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_slt:
-; SI: v_cmp_lt_i64
-define void @i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_slt:
+; GCN: v_cmp_lt_i64
+define void @i64_slt(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp slt i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp slt i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
 
-; FUNC-LABEL: {{^}}i64_sle:
-; SI: v_cmp_le_i64
-define void @i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) {
+; GCN-LABEL: {{^}}i64_sle:
+; GCN: v_cmp_le_i64
+define void @i64_sle(i32 addrspace(1)* %out, i64 %a, i64 %b) #0 {
 entry:
-  %0 = icmp sle i64 %a, %b
-  %1 = sext i1 %0 to i32
-  store i32 %1, i32 addrspace(1)* %out
+  %tmp0 = icmp sle i64 %a, %b
+  %tmp1 = sext i1 %tmp0 to i32
+  store i32 %tmp1, i32 addrspace(1)* %out
   ret void
 }
+
+attributes #0 = { nounwind }




More information about the llvm-commits mailing list