[llvm] Add inreg bit convert tests (PR #136112)
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Mon May 5 12:07:05 PDT 2025
================
@@ -0,0 +1,582 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+
+; RUN: llc -mtriple=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=SI %s
+; RUN: llc -mtriple=amdgcn -mcpu=tonga < %s | FileCheck -check-prefix=VI %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx900 < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -mtriple=amdgcn -mcpu=gfx1100 < %s | FileCheck -check-prefix=GFX11 %s
+
+define inreg half @s_bitcast_i16_to_f16_inreg(i16 inreg %a, i32 inreg %b) {
+; SI-LABEL: s_bitcast_i16_to_f16_inreg:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: s_and_b32 s6, s16, 0xffff
+; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_cbranch_scc0 .LBB0_4
+; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
+; SI-NEXT: s_cbranch_execnz .LBB0_3
+; SI-NEXT: .LBB0_2: ; %cmp.true
+; SI-NEXT: s_add_i32 s6, s6, 3
+; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
+; SI-NEXT: .LBB0_3: ; %end
+; SI-NEXT: s_setpc_b64 s[30:31]
+; SI-NEXT: .LBB0_4:
+; SI-NEXT: ; implicit-def: $vgpr0
+; SI-NEXT: s_branch .LBB0_2
+;
+; VI-LABEL: s_bitcast_i16_to_f16_inreg:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_cbranch_scc0 .LBB0_4
+; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: s_cbranch_execnz .LBB0_3
+; VI-NEXT: .LBB0_2: ; %cmp.true
+; VI-NEXT: s_add_i32 s16, s16, 3
+; VI-NEXT: .LBB0_3: ; %end
+; VI-NEXT: v_mov_b32_e32 v0, s16
+; VI-NEXT: s_setpc_b64 s[30:31]
+; VI-NEXT: .LBB0_4:
+; VI-NEXT: s_branch .LBB0_2
+;
+; GFX9-LABEL: s_bitcast_i16_to_f16_inreg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_cbranch_scc0 .LBB0_4
+; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: s_cbranch_execnz .LBB0_3
+; GFX9-NEXT: .LBB0_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s16, s16, 3
+; GFX9-NEXT: .LBB0_3: ; %end
+; GFX9-NEXT: v_mov_b32_e32 v0, s16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+; GFX9-NEXT: .LBB0_4:
+; GFX9-NEXT: s_branch .LBB0_2
+;
+; GFX11-LABEL: s_bitcast_i16_to_f16_inreg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_cbranch_scc0 .LBB0_3
+; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-NEXT: s_cbranch_vccz .LBB0_4
+; GFX11-NEXT: ; %bb.2: ; %end
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-NEXT: .LBB0_3:
+; GFX11-NEXT: .LBB0_4: ; %cmp.true
+; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %cmp.true, label %cmp.false
+
+cmp.true:
+ %a1 = add i16 %a, 3
+ %a2 = bitcast i16 %a1 to half
+ br label %end
+
+cmp.false:
+ %a3 = bitcast i16 %a to half
+ br label %end
+
+end:
+ %phi = phi half [ %a2, %cmp.true ], [ %a3, %cmp.false ]
+ ret half %phi
+}
+
+define inreg i16 @s_bitcast_f16_to_i16_inreg(half inreg %a, i32 inreg %b) {
+; SI-LABEL: s_bitcast_f16_to_i16_inreg:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, s16
+; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_cbranch_execnz .LBB1_3
+; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: s_setpc_b64 s[30:31]
+; SI-NEXT: .LBB1_4:
+; SI-NEXT: s_branch .LBB1_2
+;
+; VI-LABEL: s_bitcast_f16_to_i16_inreg:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_cbranch_scc0 .LBB1_3
+; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: s_cbranch_execnz .LBB1_4
+; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: v_mov_b32_e32 v0, 0x200
+; VI-NEXT: v_add_f16_e32 v0, s16, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+; VI-NEXT: .LBB1_3:
+; VI-NEXT: s_branch .LBB1_2
+; VI-NEXT: .LBB1_4:
+; VI-NEXT: v_mov_b32_e32 v0, s16
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_bitcast_f16_to_i16_inreg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: s_cbranch_execnz .LBB1_4
+; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
+; GFX9-NEXT: v_add_f16_e32 v0, s16, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+; GFX9-NEXT: .LBB1_3:
+; GFX9-NEXT: s_branch .LBB1_2
+; GFX9-NEXT: .LBB1_4:
+; GFX9-NEXT: v_mov_b32_e32 v0, s16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_bitcast_f16_to_i16_inreg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: v_add_f16_e64 v0, 0x200, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-NEXT: .LBB1_3:
+; GFX11-NEXT: s_branch .LBB1_2
+; GFX11-NEXT: .LBB1_4:
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %cmp.true, label %cmp.false
+
+cmp.true:
+ %a1 = fadd half %a, 0xH0200
+ %a2 = bitcast half %a1 to i16
+ br label %end
+
+cmp.false:
+ %a3 = bitcast half %a to i16
+ br label %end
+
+end:
+ %phi = phi i16 [ %a2, %cmp.true ], [ %a3, %cmp.false ]
+ ret i16 %phi
+}
+
+define inreg bfloat @s_bitcast_i16_to_bf16_inreg(i16 inreg %a, i32 inreg %b) {
+; SI-LABEL: s_bitcast_i16_to_bf16_inreg:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: s_and_b32 s6, s16, 0xffff
+; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_cbranch_scc0 .LBB2_4
+; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_lshl_b32 s7, s6, 16
+; SI-NEXT: s_cbranch_execnz .LBB2_3
+; SI-NEXT: .LBB2_2: ; %cmp.true
+; SI-NEXT: s_lshl_b32 s4, s6, 16
+; SI-NEXT: s_add_i32 s7, s4, 0x30000
+; SI-NEXT: .LBB2_3: ; %end
+; SI-NEXT: v_mov_b32_e32 v0, s7
+; SI-NEXT: s_setpc_b64 s[30:31]
+; SI-NEXT: .LBB2_4:
+; SI-NEXT: ; implicit-def: $sgpr7
+; SI-NEXT: s_branch .LBB2_2
+;
+; VI-LABEL: s_bitcast_i16_to_bf16_inreg:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_cbranch_scc0 .LBB2_4
+; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: s_cbranch_execnz .LBB2_3
+; VI-NEXT: .LBB2_2: ; %cmp.true
+; VI-NEXT: s_add_i32 s16, s16, 3
+; VI-NEXT: .LBB2_3: ; %end
+; VI-NEXT: v_mov_b32_e32 v0, s16
+; VI-NEXT: s_setpc_b64 s[30:31]
+; VI-NEXT: .LBB2_4:
+; VI-NEXT: s_branch .LBB2_2
+;
+; GFX9-LABEL: s_bitcast_i16_to_bf16_inreg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_cbranch_scc0 .LBB2_4
+; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: s_cbranch_execnz .LBB2_3
+; GFX9-NEXT: .LBB2_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s16, s16, 3
+; GFX9-NEXT: .LBB2_3: ; %end
+; GFX9-NEXT: v_mov_b32_e32 v0, s16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+; GFX9-NEXT: .LBB2_4:
+; GFX9-NEXT: s_branch .LBB2_2
+;
+; GFX11-LABEL: s_bitcast_i16_to_bf16_inreg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_cbranch_scc0 .LBB2_3
+; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-NEXT: s_cbranch_vccz .LBB2_4
+; GFX11-NEXT: ; %bb.2: ; %end
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-NEXT: .LBB2_3:
+; GFX11-NEXT: .LBB2_4: ; %cmp.true
+; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %cmp.true, label %cmp.false
+
+cmp.true:
+ %a1 = add i16 %a, 3
+ %a2 = bitcast i16 %a1 to bfloat
+ br label %end
+
+cmp.false:
+ %a3 = bitcast i16 %a to bfloat
+ br label %end
+
+end:
+ %phi = phi bfloat [ %a2, %cmp.true ], [ %a3, %cmp.false ]
+ ret bfloat %phi
+}
+
+define inreg i16 @s_bitcast_bf16_to_i16_inreg(bfloat inreg %a, i32 inreg %b) {
+; SI-LABEL: s_bitcast_bf16_to_i16_inreg:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s16
+; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
+; SI-NEXT: s_cbranch_execnz .LBB3_3
+; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v1
+; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
+; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: s_setpc_b64 s[30:31]
+; SI-NEXT: .LBB3_4:
+; SI-NEXT: ; implicit-def: $vgpr0
+; SI-NEXT: s_branch .LBB3_2
+;
+; VI-LABEL: s_bitcast_bf16_to_i16_inreg:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: s_cbranch_execnz .LBB3_4
+; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_lshl_b32 s4, s16, 16
+; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
+; VI-NEXT: v_add_f32_e32 v0, s4, v0
+; VI-NEXT: v_bfe_u32 v1, v0, 16, 1
+; VI-NEXT: v_add_u32_e32 v1, vcc, v1, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 0x7fff, v1
+; VI-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; VI-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; VI-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+; VI-NEXT: .LBB3_3:
+; VI-NEXT: s_branch .LBB3_2
+; VI-NEXT: .LBB3_4:
+; VI-NEXT: v_mov_b32_e32 v0, s16
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_bitcast_bf16_to_i16_inreg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: s_cbranch_execnz .LBB3_4
+; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_lshl_b32 s4, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
+; GFX9-NEXT: v_add_f32_e32 v0, s4, v0
+; GFX9-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x7fff, v1
+; GFX9-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v0, v0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+; GFX9-NEXT: .LBB3_3:
+; GFX9-NEXT: s_branch .LBB3_2
+; GFX9-NEXT: .LBB3_4:
+; GFX9-NEXT: v_mov_b32_e32 v0, s16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_bitcast_bf16_to_i16_inreg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
+; GFX11-NEXT: v_bfe_u32 v1, v0, 16, 1
+; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v0
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v0, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v0
+; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
+; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-NEXT: .LBB3_3:
+; GFX11-NEXT: s_branch .LBB3_2
+; GFX11-NEXT: .LBB3_4:
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %cmp.true, label %cmp.false
+
+cmp.true:
+ %a1 = fadd bfloat %a, 0xR40C0
+ %a2 = bitcast bfloat %a1 to i16
+ br label %end
+
+cmp.false:
+ %a3 = bitcast bfloat %a to i16
+ br label %end
+
+end:
+ %phi = phi i16 [ %a2, %cmp.true ], [ %a3, %cmp.false ]
+ ret i16 %phi
+}
+
+define inreg bfloat @s_bitcast_f16_to_bf16_inreg(half inreg %a, i32 inreg %b) {
+; SI-LABEL: s_bitcast_f16_to_bf16_inreg:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v1, s16
+; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_cbranch_scc0 .LBB4_4
+; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1
+; SI-NEXT: s_cbranch_execnz .LBB4_3
+; SI-NEXT: .LBB4_2: ; %cmp.true
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v1
+; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: .LBB4_3: ; %end
+; SI-NEXT: s_setpc_b64 s[30:31]
+; SI-NEXT: .LBB4_4:
+; SI-NEXT: ; implicit-def: $vgpr0
+; SI-NEXT: s_branch .LBB4_2
+;
+; VI-LABEL: s_bitcast_f16_to_bf16_inreg:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_cbranch_scc0 .LBB4_3
+; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: s_cbranch_execnz .LBB4_4
+; VI-NEXT: .LBB4_2: ; %cmp.true
+; VI-NEXT: v_mov_b32_e32 v0, 0x200
+; VI-NEXT: v_add_f16_e32 v0, s16, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+; VI-NEXT: .LBB4_3:
+; VI-NEXT: s_branch .LBB4_2
+; VI-NEXT: .LBB4_4:
+; VI-NEXT: v_mov_b32_e32 v0, s16
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: s_bitcast_f16_to_bf16_inreg:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_cbranch_scc0 .LBB4_3
+; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: s_cbranch_execnz .LBB4_4
+; GFX9-NEXT: .LBB4_2: ; %cmp.true
+; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
+; GFX9-NEXT: v_add_f16_e32 v0, s16, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+; GFX9-NEXT: .LBB4_3:
+; GFX9-NEXT: s_branch .LBB4_2
+; GFX9-NEXT: .LBB4_4:
+; GFX9-NEXT: v_mov_b32_e32 v0, s16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX11-LABEL: s_bitcast_f16_to_bf16_inreg:
+; GFX11: ; %bb.0:
+; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_cbranch_scc0 .LBB4_3
+; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX11-NEXT: s_cbranch_vccnz .LBB4_4
+; GFX11-NEXT: .LBB4_2: ; %cmp.true
+; GFX11-NEXT: v_add_f16_e64 v0, 0x200, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-NEXT: .LBB4_3:
+; GFX11-NEXT: s_branch .LBB4_2
+; GFX11-NEXT: .LBB4_4:
+; GFX11-NEXT: v_mov_b32_e32 v0, s0
+; GFX11-NEXT: s_setpc_b64 s[30:31]
+ %cmp = icmp eq i32 %b, 0
+ br i1 %cmp, label %cmp.true, label %cmp.false
+
+cmp.true:
+ %a1 = fadd half %a, 0xH0200
+ %a2 = bitcast half %a1 to bfloat
+ br label %end
+
+cmp.false:
+ %a3 = bitcast half %a to bfloat
+ br label %end
+
+end:
+ %phi = phi bfloat [ %a2, %cmp.true ], [ %a3, %cmp.false ]
+ ret bfloat %phi
+}
+
+define inreg half @s_bitcast_bf16_to_f16_inreg(bfloat inreg %a, i32 inreg %b) {
----------------
arsenm wrote:
Don't need to include inreg in the function names, inreg is an implementation detail
https://github.com/llvm/llvm-project/pull/136112
More information about the llvm-commits
mailing list