[PATCH] R600/SI: Try to keep mul on SALU

Tom Stellard tom at stellard.net
Wed Sep 3 13:00:21 PDT 2014


On Wed, Sep 03, 2014 at 03:32:14PM -0400, Matt Arsenault wrote:
> 
> On Sep 3, 2014, at 2:53 PM, Tom Stellard <tom at stellard.net> wrote:
> 
> > On Sat, Aug 30, 2014 at 10:35:23PM +0000, Matt Arsenault wrote:
> >> Also fix bug this exposed where when legalizing an immediate
> >> operand, a v_mov_b32 would be created with a VSrc dest register.
> >> 
> >> http://reviews.llvm.org/D5134
> >> 
> >> Files:
> >>  lib/Target/R600/SIInstrInfo.cpp
> >>  lib/Target/R600/SIInstructions.td
> >>  test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
> >>  test/CodeGen/R600/mul.ll
> >>  test/CodeGen/R600/sign_extend.ll
> > 
> >> Index: lib/Target/R600/SIInstrInfo.cpp
> >> ===================================================================
> >> --- lib/Target/R600/SIInstrInfo.cpp
> >> +++ lib/Target/R600/SIInstrInfo.cpp
> >> @@ -908,6 +908,7 @@
> >>   case AMDGPU::S_ADDC_U32: return AMDGPU::V_ADDC_U32_e32;
> >>   case AMDGPU::S_SUB_I32: return AMDGPU::V_SUB_I32_e32;
> >>   case AMDGPU::S_SUBB_U32: return AMDGPU::V_SUBB_U32_e32;
> >> +  case AMDGPU::S_MUL_I32: return AMDGPU::V_MUL_LO_I32;
> >>   case AMDGPU::S_AND_B32: return AMDGPU::V_AND_B32_e32;
> >>   case AMDGPU::S_OR_B32: return AMDGPU::V_OR_B32_e32;
> >>   case AMDGPU::S_XOR_B32: return AMDGPU::V_XOR_B32_e32;
> >> @@ -981,10 +982,14 @@
> >>   unsigned RCID = get(MI->getOpcode()).OpInfo[OpIdx].RegClass;
> >>   const TargetRegisterClass *RC = RI.getRegClass(RCID);
> >>   unsigned Opcode = AMDGPU::V_MOV_B32_e32;
> >> +
> >>   if (MO.isReg()) {
> >>     Opcode = AMDGPU::COPY;
> >>   } else if (RI.isSGPRClass(RC)) {
> >>     Opcode = AMDGPU::S_MOV_B32;
> >> +  } else if (MO.isImm()) {
> >> +    if (RC == &AMDGPU::VSrc_32RegClass)
> >> +      Opcode = AMDGPU::S_MOV_B32;
> >>   }
> > 
> > I think I have a similar fix in a local series I'm working on.
> > 
> >> 
> >>   const TargetRegisterClass *VRC = RI.getEquivalentVGPRClass(RC);
> >> Index: lib/Target/R600/SIInstructions.td
> >> ===================================================================
> >> --- lib/Target/R600/SIInstructions.td
> >> +++ lib/Target/R600/SIInstructions.td
> >> @@ -274,11 +274,15 @@
> >>   [(set i64:$dst, (sra i64:$src0, i32:$src1))]
> >>> ;
> >> 
> >> -} // End AddedComplexity = 1
> >> 
> >> def S_BFM_B32 : SOP2_32 <0x00000024, "S_BFM_B32", []>;
> >> def S_BFM_B64 : SOP2_64 <0x00000025, "S_BFM_B64", []>;
> >> -def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32", []>;
> >> +def S_MUL_I32 : SOP2_32 <0x00000026, "S_MUL_I32",
> >> +  [(set i32:$dst, (mul i32:$src0, i32:$src1))]
> >> +>;
> >> +
> >> +} // End AddedComplexity = 1
> >> +
> >> def S_BFE_U32 : SOP2_32 <0x00000027, "S_BFE_U32", []>;
> >> def S_BFE_I32 : SOP2_32 <0x00000028, "S_BFE_I32", []>;
> >> def S_BFE_U64 : SOP2_64 <0x00000029, "S_BFE_U64", []>;
> >> @@ -2499,11 +2503,6 @@
> >> def : UMad24Pat<V_MAD_U32_U24>;
> >> 
> >> def : Pat <
> >> -  (mul i32:$src0, i32:$src1),
> >> -  (V_MUL_LO_I32 $src0, $src1)
> >> ->;
> >> -
> >> -def : Pat <
> >>   (mulhu i32:$src0, i32:$src1),
> >>   (V_MUL_HI_U32 $src0, $src1)
> >>> ;
> >> @@ -2513,6 +2512,11 @@
> >>   (V_MUL_HI_I32 $src0, $src1)
> >>> ;
> >> 
> >> +def : Pat <
> >> +  (mul i32:$src0, i32:$src1),
> >> +  (V_MUL_LO_I32 $src0, $src1)
> >> +>;
> >> +
> > 
> > Did you mean to move this pattern?
> > 
> 
> Not really. I deleted it at first and then re-added it to implement the branch workaround. I’ll remove this
> 

OK, LGTM.

> 
> > 
> >> def : Vop3ModPat<V_MAD_F32, VOP_F32_F32_F32_F32, AMDGPUmad>;
> >> 
> >> 
> >> Index: test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
> >> ===================================================================
> >> --- test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
> >> +++ test/CodeGen/R600/codegen-prepare-addrmode-sext.ll
> >> @@ -7,8 +7,8 @@
> >> ; FUNC-LABEL: @test
> >> ; OPT: mul nsw i32
> >> ; OPT-NEXT: sext
> >> -; SI-LLC: V_MUL_LO_I32
> >> -; SI-LLC-NOT: V_MUL_HI
> >> +; SI-LLC: S_MUL_I32
> >> +; SI-LLC-NOT: MUL
> >> define void @test(i8 addrspace(1)* nocapture readonly %in, i32 %a, i8 %b) {
> >> entry:
> >>   %0 = mul nsw i32 %a, 3
> >> Index: test/CodeGen/R600/mul.ll
> >> ===================================================================
> >> --- test/CodeGen/R600/mul.ll
> >> +++ test/CodeGen/R600/mul.ll
> >> @@ -3,14 +3,14 @@
> >> 
> >> ; mul24 and mad24 are affected
> >> 
> >> -; FUNC-LABEL: @test2
> >> +; FUNC-LABEL: @test_mul_v2i32
> >> ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
> >> ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
> >> 
> >> ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> >> ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> >> 
> >> -define void @test2(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
> >> +define void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
> >>   %b_ptr = getelementptr <2 x i32> addrspace(1)* %in, i32 1
> >>   %a = load <2 x i32> addrspace(1) * %in
> >>   %b = load <2 x i32> addrspace(1) * %b_ptr
> >> @@ -19,7 +19,7 @@
> >>   ret void
> >> }
> >> 
> >> -; FUNC-LABEL: @test4
> >> +; FUNC-LABEL: @v_mul_v4i32
> >> ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
> >> ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
> >> ; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
> >> @@ -30,7 +30,7 @@
> >> ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> >> ; SI: V_MUL_LO_I32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
> >> 
> >> -define void @test4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
> >> +define void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
> >>   %b_ptr = getelementptr <4 x i32> addrspace(1)* %in, i32 1
> >>   %a = load <4 x i32> addrspace(1) * %in
> >>   %b = load <4 x i32> addrspace(1) * %b_ptr
> >> @@ -39,12 +39,26 @@
> >>   ret void
> >> }
> >> 
> >> -; FUNC-LABEL: @trunc_i64_mul_to_i32
> >> +; FUNC-LABEL: @s_trunc_i64_mul_to_i32
> >> +; SI: S_LOAD_DWORD
> >> +; SI: S_LOAD_DWORD
> >> +; SI: S_MUL_I32
> >> +; SI: BUFFER_STORE_DWORD
> >> +define void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
> >> +  %mul = mul i64 %b, %a
> >> +  %trunc = trunc i64 %mul to i32
> >> +  store i32 %trunc, i32 addrspace(1)* %out, align 8
> >> +  ret void
> >> +}
> >> +
> >> +; FUNC-LABEL: @v_trunc_i64_mul_to_i32
> >> ; SI: S_LOAD_DWORD
> >> ; SI: S_LOAD_DWORD
> >> ; SI: V_MUL_LO_I32
> >> ; SI: BUFFER_STORE_DWORD
> >> -define void @trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
> >> +define void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
> >> +  %a = load i64 addrspace(1)* %aptr, align 8
> >> +  %b = load i64 addrspace(1)* %bptr, align 8
> >>   %mul = mul i64 %b, %a
> >>   %trunc = trunc i64 %mul to i32
> >>   store i32 %trunc, i32 addrspace(1)* %out, align 8
> >> @@ -56,7 +70,7 @@
> >> ; FUNC-LABEL: @mul64_sext_c
> >> ; EG-DAG: MULLO_INT
> >> ; EG-DAG: MULHI_INT
> >> -; SI-DAG: V_MUL_LO_I32
> >> +; SI-DAG: S_MUL_I32
> >> ; SI-DAG: V_MUL_HI_I32
> >> define void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
> >> entry:
> >> @@ -66,16 +80,120 @@
> >>   ret void
> >> }
> >> 
> >> +; FUNC-LABEL: @v_mul64_sext_c:
> >> +; EG-DAG: MULLO_INT
> >> +; EG-DAG: MULHI_INT
> >> +; SI-DAG: V_MUL_LO_I32
> >> +; SI-DAG: V_MUL_HI_I32
> >> +; SI: S_ENDPGM
> >> +define void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
> >> +  %val = load i32 addrspace(1)* %in, align 4
> >> +  %ext = sext i32 %val to i64
> >> +  %mul = mul i64 %ext, 80
> >> +  store i64 %mul, i64 addrspace(1)* %out, align 8
> >> +  ret void
> >> +}
> >> +
> >> +; FUNC-LABEL: @v_mul64_sext_inline_imm:
> >> +; SI-DAG: V_MUL_LO_I32 v{{[0-9]+}}, 9, v{{[0-9]+}}
> >> +; SI-DAG: V_MUL_HI_I32 v{{[0-9]+}}, 9, v{{[0-9]+}}
> >> +; SI: S_ENDPGM
> >> +define void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
> >> +  %val = load i32 addrspace(1)* %in, align 4
> >> +  %ext = sext i32 %val to i64
> >> +  %mul = mul i64 %ext, 9
> >> +  store i64 %mul, i64 addrspace(1)* %out, align 8
> >> +  ret void
> >> +}
> >> +
> >> +; FUNC-LABEL: @s_mul_i32:
> >> +; SI: S_LOAD_DWORD [[SRC0:s[0-9]+]],
> >> +; SI: S_LOAD_DWORD [[SRC1:s[0-9]+]],
> >> +; SI: S_MUL_I32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]]
> >> +; SI: V_MOV_B32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
> >> +; SI: BUFFER_STORE_DWORD [[VRESULT]],
> >> +; SI: S_ENDPGM
> >> +define void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
> >> +  %mul = mul i32 %a, %b
> >> +  store i32 %mul, i32 addrspace(1)* %out, align 4
> >> +  ret void
> >> +}
> >> +
> >> +; FUNC-LABEL: @v_mul_i32
> >> +; SI: V_MUL_LO_I32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
> >> +define void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
> >> +  %b_ptr = getelementptr i32 addrspace(1)* %in, i32 1
> >> +  %a = load i32 addrspace(1)* %in
> >> +  %b = load i32 addrspace(1)* %b_ptr
> >> +  %result = mul i32 %a, %b
> >> +  store i32 %result, i32 addrspace(1)* %out
> >> +  ret void
> >> +}
> >> +
> >> ; A standard 64-bit multiply.  The expansion should be around 6 instructions.
> >> ; It would be difficult to match the expansion correctly without writing
> >> ; a really complicated list of FileCheck expressions.  I don't want
> >> ; to confuse people who may 'break' this test with a correct optimization,
> >> ; so this test just uses FUNC-LABEL to make sure the compiler does not
> >> ; crash with a 'failed to select' error.
> >> -; FUNC-LABEL: @mul64
> >> -define void @mul64(i64 addrspace(1)* %out, i64 %a, i64 %b) {
> >> +
> >> +; FUNC-LABEL: @s_mul_i64:
> >> +define void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
> >> +  %mul = mul i64 %a, %b
> >> +  store i64 %mul, i64 addrspace(1)* %out, align 8
> >> +  ret void
> >> +}
> >> +
> >> +; FUNC-LABEL: @v_mul_i64
> >> +; SI: V_MUL_LO_I32
> >> +define void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
> >> +  %a = load i64 addrspace(1)* %aptr, align 8
> >> +  %b = load i64 addrspace(1)* %bptr, align 8
> >> +  %mul = mul i64 %a, %b
> >> +  store i64 %mul, i64 addrspace(1)* %out, align 8
> >> +  ret void
> >> +}
> >> +
> >> +; FUNC-LABEL: @mul32_in_branch
> >> +; SI: V_MUL_LO_I32
> >> +define void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
> >> entry:
> >> -  %0 = mul i64 %a, %b
> >> -  store i64 %0, i64 addrspace(1)* %out
> >> +  %0 = icmp eq i32 %a, 0
> >> +  br i1 %0, label %if, label %else
> >> +
> >> +if:
> >> +  %1 = load i32 addrspace(1)* %in
> >> +  br label %endif
> >> +
> >> +else:
> >> +  %2 = mul i32 %a, %b
> >> +  br label %endif
> >> +
> >> +endif:
> >> +  %3 = phi i32 [%1, %if], [%2, %else]
> >> +  store i32 %3, i32 addrspace(1)* %out
> >> +  ret void
> >> +}
> >> +
> >> +; FUNC-LABEL: @mul64_in_branch
> >> +; SI-DAG: V_MUL_LO_I32
> >> +; SI-DAG: V_MUL_HI_U32
> >> +; SI: S_ENDPGM
> >> +define void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
> >> +entry:
> >> +  %0 = icmp eq i64 %a, 0
> >> +  br i1 %0, label %if, label %else
> >> +
> >> +if:
> >> +  %1 = load i64 addrspace(1)* %in
> >> +  br label %endif
> >> +
> >> +else:
> >> +  %2 = mul i64 %a, %b
> >> +  br label %endif
> >> +
> >> +endif:
> >> +  %3 = phi i64 [%1, %if], [%2, %else]
> >> +  store i64 %3, i64 addrspace(1)* %out
> >>   ret void
> >> }
> >> Index: test/CodeGen/R600/sign_extend.ll
> >> ===================================================================
> >> --- test/CodeGen/R600/sign_extend.ll
> >> +++ test/CodeGen/R600/sign_extend.ll
> >> @@ -10,10 +10,10 @@
> >>   ret void
> >> }
> >> 
> >> -; SI-LABEL: @test:
> >> -; SI: V_ASHR
> >> +; SI-LABEL: @test_s_sext_i32_to_i64:
> >> +; SI: S_ASHR_I32
> >> ; SI: S_ENDPG
> >> -define void @test(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
> >> +define void @test_s_sext_i32_to_i64(i64 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) nounwind {
> >> entry:
> >>   %mul = mul i32 %a, %b
> >>   %add = add i32 %mul, %c
> > 
> >> _______________________________________________
> >> llvm-commits mailing list
> >> llvm-commits at cs.uiuc.edu
> >> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
> > 
> > _______________________________________________
> > llvm-commits mailing list
> > llvm-commits at cs.uiuc.edu
> > http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
> 




More information about the llvm-commits mailing list