[llvm] r232085 - [X86] Fix a regression introduced by r223641.

Hans Wennborg hans at chromium.org
Thu Mar 12 14:57:54 PDT 2015


+Tom Stellard who owns 3.6.1 (3.6 was released a while ago :-)

On Thu, Mar 12, 2015 at 2:50 PM, Quentin Colombet <qcolombet at apple.com> wrote:
> Hi Hans,
>
> can we get this into 3.6?
>
> +Nadav for approval.
>
> Thanks,
> -Quentin
>> On Mar 12, 2015, at 12:34 PM, Quentin Colombet <qcolombet at apple.com> wrote:
>>
>> Author: qcolombet
>> Date: Thu Mar 12 14:34:12 2015
>> New Revision: 232085
>>
>> URL: http://llvm.org/viewvc/llvm-project?rev=232085&view=rev
>> Log:
>> [X86] Fix a regression introduced by r223641.
>> The permps and permd instructions have their operands swapped compared to the
>> intrinsic definition. Therefore, they do not fall into the INTR_TYPE_2OP
>> category.
>>
>> I did not create a new category for those two, as they are the only one AFAICT
>> in that case.
>>
>> <rdar://problem/20108262>
>>
>> Modified:
>>    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
>>    llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
>>    llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll
>>    llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll
>>
>> Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=232085&r1=232084&r2=232085&view=diff
>> ==============================================================================
>> --- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
>> +++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Thu Mar 12 14:34:12 2015
>> @@ -14711,6 +14711,13 @@ static SDValue LowerINTRINSIC_WO_CHAIN(S
>>   switch (IntNo) {
>>   default: return SDValue();    // Don't custom lower most intrinsics.
>>
>> +  case Intrinsic::x86_avx2_permd:
>> +  case Intrinsic::x86_avx2_permps:
>> +    // Operands intentionally swapped. Mask is last operand to intrinsic,
>> +    // but second operand for node/instruction.
>> +    return DAG.getNode(X86ISD::VPERMV, dl, Op.getValueType(),
>> +                       Op.getOperand(2), Op.getOperand(1));
>> +
>>   case Intrinsic::x86_avx512_mask_valign_q_512:
>>   case Intrinsic::x86_avx512_mask_valign_d_512:
>>     // Vector source operands are swapped.
>>
>> Modified: llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h?rev=232085&r1=232084&r2=232085&view=diff
>> ==============================================================================
>> --- llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h (original)
>> +++ llvm/trunk/lib/Target/X86/X86IntrinsicsInfo.h Thu Mar 12 14:34:12 2015
>> @@ -175,8 +175,6 @@ static const IntrinsicData  IntrinsicsWi
>>   X86_INTRINSIC_DATA(avx2_packsswb,     INTR_TYPE_2OP, X86ISD::PACKSS, 0),
>>   X86_INTRINSIC_DATA(avx2_packusdw,     INTR_TYPE_2OP, X86ISD::PACKUS, 0),
>>   X86_INTRINSIC_DATA(avx2_packuswb,     INTR_TYPE_2OP, X86ISD::PACKUS, 0),
>> -  X86_INTRINSIC_DATA(avx2_permd,        INTR_TYPE_2OP, X86ISD::VPERMV, 0),
>> -  X86_INTRINSIC_DATA(avx2_permps,       INTR_TYPE_2OP, X86ISD::VPERMV, 0),
>>   X86_INTRINSIC_DATA(avx2_phadd_d,      INTR_TYPE_2OP, X86ISD::HADD, 0),
>>   X86_INTRINSIC_DATA(avx2_phadd_w,      INTR_TYPE_2OP, X86ISD::HADD, 0),
>>   X86_INTRINSIC_DATA(avx2_phsub_d,      INTR_TYPE_2OP, X86ISD::HSUB, 0),
>>
>> Modified: llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll?rev=232085&r1=232084&r2=232085&view=diff
>> ==============================================================================
>> --- llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll (original)
>> +++ llvm/trunk/test/CodeGen/X86/avx2-intrinsics-x86.ll Thu Mar 12 14:34:12 2015
>> @@ -746,7 +746,10 @@ declare <4 x i64> @llvm.x86.avx2.pbroadc
>>
>>
>> define <8 x i32> @test_x86_avx2_permd(<8 x i32> %a0, <8 x i32> %a1) {
>> -  ; CHECK: vpermd
>> +  ; Check that the arguments are swapped between the intrinsic definition
>> +  ; and its lowering. Indeed, the offsets are the first source in
>> +  ; the instruction.
>> +  ; CHECK: vpermd %ymm0, %ymm1, %ymm0
>>   %res = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1) ; <<8 x i32>> [#uses=1]
>>   ret <8 x i32> %res
>> }
>> @@ -754,7 +757,10 @@ declare <8 x i32> @llvm.x86.avx2.permd(<
>>
>>
>> define <8 x float> @test_x86_avx2_permps(<8 x float> %a0, <8 x float> %a1) {
>> -  ; CHECK: vpermps
>> +  ; Check that the arguments are swapped between the intrinsic definition
>> +  ; and its lowering. Indeed, the offsets are the first source in
>> +  ; the instruction.
>> +  ; CHECK: vpermps %ymm0, %ymm1, %ymm0
>>   %res = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x float> %a1) ; <<8 x float>> [#uses=1]
>>   ret <8 x float> %res
>> }
>>
>> Modified: llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll
>> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll?rev=232085&r1=232084&r2=232085&view=diff
>> ==============================================================================
>> --- llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll (original)
>> +++ llvm/trunk/test/CodeGen/X86/stack-folding-int-avx2.ll Thu Mar 12 14:34:12 2015
>> @@ -442,7 +442,7 @@ define <8 x i32> @stack_fold_permd(<8 x
>>   ;CHECK-LABEL: stack_fold_permd
>>   ;CHECK:   vpermd {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
>>   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>> -  %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a0, <8 x i32> %a1)
>> +  %2 = call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %a1, <8 x i32> %a0)
>>   ret <8 x i32> %2
>> }
>> declare <8 x i32> @llvm.x86.avx2.permd(<8 x i32>, <8 x i32>) nounwind readonly
>> @@ -461,7 +461,7 @@ define <8 x float> @stack_fold_permps(<8
>>   ;CHECK-LABEL: stack_fold_permps
>>   ;CHECK:       vpermps {{-?[0-9]*}}(%rsp), {{%ymm[0-9][0-9]*}}, {{%ymm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
>>   %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{flags}"()
>> -  %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a0, <8 x float> %a1)
>> +  %2 = call <8 x float> @llvm.x86.avx2.permps(<8 x float> %a1, <8 x float> %a0)
>>   ret <8 x float> %2
>> }
>> declare <8 x float> @llvm.x86.avx2.permps(<8 x float>, <8 x float>) nounwind readonly
>>
>>
>> _______________________________________________
>> llvm-commits mailing list
>> llvm-commits at cs.uiuc.edu
>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>



More information about the llvm-commits mailing list