[PATCH][AVX512] Add 512b integer shift by variable patterns and intrinsics

Cameron McInally cameron.mcinally at nyu.edu
Tue Nov 25 08:25:53 PST 2014


On Thu, Nov 20, 2014 at 2:01 AM, Demikhovsky, Elena
<elena.demikhovsky at intel.com> wrote:
> I think that you should organize template classes in a form convenient for adding VLI.
> I agree that instructions their self may be added later.
>
> -  Elena
>
>
> -----Original Message-----
> From: Cameron McInally [mailto:cameron.mcinally at nyu.edu]
> Sent: Wednesday, November 19, 2014 15:58
> To: Demikhovsky, Elena
> Cc: llvm-commits at cs.uiuc.edu; Robert Khasanov
> Subject: Re: [PATCH][AVX512] Add 512b integer shift by variable patterns and intrinsics
>
> Hi Elena,
>
> I've attached an updated patch that adds the "_mask" suffix to the GNU builtin names and also updates the AVX512BIBase base class.
>
> If it's okay with you, I'd like to add the AVX512VL variants under a separate patch. Is that okay with you? ;)
>
> -Cam
>
> On Wed, Nov 19, 2014 at 2:24 AM, Demikhovsky, Elena <elena.demikhovsky at intel.com> wrote:
>> Hi Cam,
>>
>> The GCC built-in names are incorrect. Please look here
>> https://gcc.gnu.org/svn/gcc/branches/avx512-vlbwdq/gcc/config/i386/avx
>> 512fintrin.h
>>  for the reference.
>>
>> I also suggest to put two more multiclasses in the middle, we'll need to add "W" form as well.
>>
>> +defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl, v4i32, bc_v4i32, v16i32_info>,
>> +                           EVEX_V512, EVEX_CD8<32, CD8VQ>; defm
>> +VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl, v2i64, bc_v2i64, v8i64_info>,
>> +                           EVEX_V512, EVEX_CD8<64, CD8VQ>, VEX_W;
>>
>> I suggest something to write like this:
>>
>> multiclass avx512_varshift_sizes <>{
>> defm Z:           avx512_shift_rrm<opc..>, EVEX_V512
>> defm Z256:     avx512_shift_rrm<opc..>, EVEX_V256
>> defm Z128:     avx512_shift_rrm<opc.. ..>, EVEX_V128
>> }
>>
>> multiclass avx512_varshift_types < bits<8> opcd, bits<8> opcq, bits<8>
>> opcqw, string OpcodeStr, SDNode OpNode > { defm D:
>> avx512_varshift_sizes <opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32,
>> .. >, EVEX_CD8<32, CD8VQ> defm Q: avx512_varshift_sizes <opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64 ..>, EVEX_CD8<64, CD8VQ>, VEX_W defm W:
>> }
>> defm VPSRL : avx512_varshift_types <0xD2, 0xD3, 0xD1
>>
>>
>>
>>
>> -  Elena
>>
>>
>> -----Original Message-----
>> From: Cameron McInally [mailto:cameron.mcinally at nyu.edu]
>> Sent: Tuesday, November 18, 2014 22:34
>> To: llvm-commits at cs.uiuc.edu
>> Cc: Demikhovsky, Elena; Robert Khasanov
>> Subject: [PATCH][AVX512] Add 512b integer shift by variable patterns
>> and intrinsics
>>
>> Hey guys,
>>
>> Attached is a patch to support 512b integer shift by variable intrinsics for AVX512.
>>
>> Thanks,
>> Cam
>> ---------------------------------------------------------------------
>> Intel Israel (74) Limited
>>
>> This e-mail and any attachments may contain confidential material for
>> the sole use of the intended recipient(s). Any review or distribution
>> by others is strictly prohibited. If you are not the intended
>> recipient, please contact the sender and delete all copies.
> ---------------------------------------------------------------------
> Intel Israel (74) Limited
>
> This e-mail and any attachments may contain confidential material for
> the sole use of the intended recipient(s). Any review or distribution
> by others is strictly prohibited. If you are not the intended
> recipient, please contact the sender and delete all copies.

Hey Elena,

Sorry for the slow reply. An updated patch is attached...

-Cam
-------------- next part --------------
Index: X86ISelLowering.cpp
===================================================================
--- X86ISelLowering.cpp	(revision 222242)
+++ X86ISelLowering.cpp	(working copy)
@@ -16537,7 +16537,11 @@
                                               RoundingMode),
                                   Mask, Src0, Subtarget, DAG);
     }
-                                              
+    case INTR_TYPE_2OP_MASK: {
+      return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Op.getOperand(1),
+                                              Op.getOperand(2)),
+                                  Op.getOperand(4), Op.getOperand(3), Subtarget, DAG);
+    }                                             
     case CMP_MASK:
     case CMP_MASK_CC: {
       // Comparison intrinsics with masks.
@@ -16589,7 +16593,7 @@
     case VSHIFT_MASK:
       return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
                                                       Op.getOperand(1), Op.getOperand(2), DAG),
-                                  Op.getOperand(4), Op.getOperand(3), Subtarget, DAG);;
+                                  Op.getOperand(4), Op.getOperand(3), Subtarget, DAG);
     default:
       break;
     }
Index: X86InstrAVX512.td
===================================================================
--- X86InstrAVX512.td	(revision 222355)
+++ X86InstrAVX512.td	(working copy)
@@ -3121,6 +3121,7 @@
 def : Pat <(i8 (int_x86_avx512_mask_ptestm_q_512 (v8i64 VR512:$src1),
                  (v8i64 VR512:$src2), (i8 -1))),
                  (COPY_TO_REGCLASS (VPTESTMQZrr VR512:$src1, VR512:$src2), GR8)>;
+
 //===----------------------------------------------------------------------===//
 // AVX-512  Shift instructions
 //===----------------------------------------------------------------------===//
@@ -3139,74 +3140,58 @@
 }
 
 multiclass avx512_shift_rrm<bits<8> opc, string OpcodeStr, SDNode OpNode,
-                          RegisterClass RC, ValueType vt, ValueType SrcVT,
-                          PatFrag bc_frag, RegisterClass KRC> {
-  // src2 is always 128-bit
-  def rr : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
-       (ins RC:$src1, VR128X:$src2),
-           !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
-       [(set RC:$dst, (vt (OpNode RC:$src1, (SrcVT VR128X:$src2))))],
-        SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V;
-  def rrk : AVX512BI<opc, MRMSrcReg, (outs RC:$dst),
-       (ins KRC:$mask, RC:$src1, VR128X:$src2),
-           !strconcat(OpcodeStr,
-                " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
-       [], SSE_INTSHIFT_ITINS_P.rr>, EVEX_4V, EVEX_K;
-  def rm : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
-       (ins RC:$src1, i128mem:$src2),
-           !strconcat(OpcodeStr, " \t{$src2, $src1, $dst|$dst, $src1, $src2}"),
-       [(set RC:$dst, (vt (OpNode RC:$src1,
-                       (bc_frag (memopv2i64 addr:$src2)))))],
-                        SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V;
-  def rmk : AVX512BI<opc, MRMSrcMem, (outs RC:$dst),
-       (ins KRC:$mask, RC:$src1, i128mem:$src2),
-           !strconcat(OpcodeStr,
-                " \t{$src2, $src1, $dst {${mask}}|$dst {${mask}}, $src1, $src2}"),
-       [], SSE_INTSHIFT_ITINS_P.rm>, EVEX_4V, EVEX_K;
+                            ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
+   // src2 is always 128-bit
+  defm rr : AVX512_maskable<opc, MRMSrcReg, _, (outs _.RC:$dst),
+                   (ins _.RC:$src1, VR128X:$src2), OpcodeStr,
+                      "$src2, $src1", "$src1, $src2",
+                   (_.VT (OpNode _.RC:$src1, (SrcVT VR128X:$src2))),
+                   " ",  SSE_INTSHIFT_ITINS_P.rr>, AVX512BIBase, EVEX_4V;
+  defm rm : AVX512_maskable<opc, MRMSrcMem, _, (outs _.RC:$dst),
+                   (ins _.RC:$src1, i128mem:$src2), OpcodeStr,
+                       "$src2, $src1", "$src1, $src2",
+                   (_.VT (OpNode _.RC:$src1, (bc_frag (memopv2i64 addr:$src2)))),
+                   " ",  SSE_INTSHIFT_ITINS_P.rm>, AVX512BIBase, EVEX_4V;
 }
 
+multiclass avx512_varshift_sizes<bits<8> opc, string OpcodeStr, SDNode OpNode,
+                                  ValueType SrcVT, PatFrag bc_frag, X86VectorVTInfo _> {
+  defm Z : avx512_shift_rrm<opc, OpcodeStr, OpNode, SrcVT, bc_frag, _>;
+}
+
+multiclass avx512_varshift_types<bits<8> opcd, bits<8> opcq, string OpcodeStr, 
+                                 SDNode OpNode> {
+  defm D : avx512_varshift_sizes<opcd, OpcodeStr#"d", OpNode, v4i32, bc_v4i32, 
+                                 v16i32_info>, EVEX_V512, EVEX_CD8<32, CD8VQ>; 
+  defm Q : avx512_varshift_sizes<opcq, OpcodeStr#"q", OpNode, v2i64, bc_v2i64, 
+                                 v8i64_info>, EVEX_V512, EVEX_CD8<64, CD8VQ>, VEX_W;
+}
+
 defm VPSRLDZ : avx512_shift_rmi<0x72, MRM2r, MRM2m, "vpsrld", X86vsrli,
                            v16i32_info>,
                            EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPSRLDZ : avx512_shift_rrm<0xD2, "vpsrld", X86vsrl,
-                           VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
-                           EVEX_CD8<32, CD8VQ>;
-                           
 defm VPSRLQZ : avx512_shift_rmi<0x73, MRM2r, MRM2m, "vpsrlq", X86vsrli,
                            v8i64_info>, EVEX_V512,
                            EVEX_CD8<64, CD8VF>, VEX_W;
-defm VPSRLQZ : avx512_shift_rrm<0xD3, "vpsrlq", X86vsrl,
-                           VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
-                           EVEX_CD8<64, CD8VQ>, VEX_W;
 
 defm VPSLLDZ : avx512_shift_rmi<0x72, MRM6r, MRM6m, "vpslld", X86vshli,
                            v16i32_info>, EVEX_V512,
                            EVEX_CD8<32, CD8VF>;
-defm VPSLLDZ : avx512_shift_rrm<0xF2, "vpslld", X86vshl,
-                           VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
-                           EVEX_CD8<32, CD8VQ>;
-                           
 defm VPSLLQZ : avx512_shift_rmi<0x73, MRM6r, MRM6m, "vpsllq", X86vshli,
                            v8i64_info>, EVEX_V512,
                            EVEX_CD8<64, CD8VF>, VEX_W;
-defm VPSLLQZ : avx512_shift_rrm<0xF3, "vpsllq", X86vshl,
-                           VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
-                           EVEX_CD8<64, CD8VQ>, VEX_W;
 
 defm VPSRADZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsrad", X86vsrai,
                            v16i32_info>,
                            EVEX_V512, EVEX_CD8<32, CD8VF>;
-defm VPSRADZ : avx512_shift_rrm<0xE2, "vpsrad", X86vsra,
-                           VR512, v16i32, v4i32, bc_v4i32, VK16WM>, EVEX_V512,
-                           EVEX_CD8<32, CD8VQ>;
-                           
 defm VPSRAQZ : avx512_shift_rmi<0x72, MRM4r, MRM4m, "vpsraq", X86vsrai,
                            v8i64_info>, EVEX_V512,
                            EVEX_CD8<64, CD8VF>, VEX_W;
-defm VPSRAQZ : avx512_shift_rrm<0xE2, "vpsraq", X86vsra,
-                           VR512, v8i64, v2i64, bc_v2i64, VK8WM>, EVEX_V512,
-                           EVEX_CD8<64, CD8VQ>, VEX_W;
 
+defm VPSRL : avx512_varshift_types<0xD2, 0xD3, "vpsrl", X86vsrl>;
+defm VPSLL : avx512_varshift_types<0xF2, 0xF3, "vpsll", X86vshl>;
+defm VPSRA : avx512_varshift_types<0xE2, 0xE2, "vpsra", X86vsra>;
+
 //===-------------------------------------------------------------------===//
 // Variable Bit Shifts
 //===-------------------------------------------------------------------===//
Index: X86IntrinsicsInfo.h
===================================================================
--- X86IntrinsicsInfo.h	(revision 222242)
+++ X86IntrinsicsInfo.h	(working copy)
@@ -21,7 +21,7 @@
   GATHER, SCATTER, PREFETCH, RDSEED, RDRAND, RDPMC, RDTSC, XTEST, ADX,
   INTR_TYPE_1OP, INTR_TYPE_2OP, INTR_TYPE_3OP,
   CMP_MASK, CMP_MASK_CC, VSHIFT, VSHIFT_MASK, COMI, 
-  INTR_TYPE_1OP_MASK_RM
+  INTR_TYPE_1OP_MASK_RM, INTR_TYPE_2OP_MASK
 };
 
 struct IntrinsicData {
@@ -195,10 +195,16 @@
   X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_128,  CMP_MASK,  X86ISD::PCMPGTM, 0),
   X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_256,  CMP_MASK,  X86ISD::PCMPGTM, 0),
   X86_INTRINSIC_DATA(avx512_mask_pcmpgt_w_512,  CMP_MASK,  X86ISD::PCMPGTM, 0),
+  X86_INTRINSIC_DATA(avx512_mask_psll_d,        INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
+  X86_INTRINSIC_DATA(avx512_mask_psll_q,        INTR_TYPE_2OP_MASK, X86ISD::VSHL, 0),
   X86_INTRINSIC_DATA(avx512_mask_pslli_d,       VSHIFT_MASK, X86ISD::VSHLI, 0),
   X86_INTRINSIC_DATA(avx512_mask_pslli_q,       VSHIFT_MASK, X86ISD::VSHLI, 0),
+  X86_INTRINSIC_DATA(avx512_mask_psra_d,        INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
+  X86_INTRINSIC_DATA(avx512_mask_psra_q,        INTR_TYPE_2OP_MASK, X86ISD::VSRA, 0),
   X86_INTRINSIC_DATA(avx512_mask_psrai_d,       VSHIFT_MASK, X86ISD::VSRAI, 0),
   X86_INTRINSIC_DATA(avx512_mask_psrai_q,       VSHIFT_MASK, X86ISD::VSRAI, 0),
+  X86_INTRINSIC_DATA(avx512_mask_psrl_d,        INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
+  X86_INTRINSIC_DATA(avx512_mask_psrl_q,        INTR_TYPE_2OP_MASK, X86ISD::VSRL, 0),
   X86_INTRINSIC_DATA(avx512_mask_psrli_d,       VSHIFT_MASK, X86ISD::VSRLI, 0),
   X86_INTRINSIC_DATA(avx512_mask_psrli_q,       VSHIFT_MASK, X86ISD::VSRLI, 0),
   X86_INTRINSIC_DATA(avx512_mask_ucmp_b_128,    CMP_MASK_CC,  X86ISD::CMPMU, 0),


More information about the llvm-commits mailing list