[llvm] r371427 - AMDGPU: Move MnemonicAlias out of instruction def hierarchy

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 9 10:25:35 PDT 2019


Author: arsenm
Date: Mon Sep  9 10:25:35 2019
New Revision: 371427

URL: http://llvm.org/viewvc/llvm-project?rev=371427&view=rev
Log:
AMDGPU: Move MnemonicAlias out of instruction def hierarchy

Unfortunately MnemonicAlias defines a "Predicates" field just like an
instruction or pattern, with a somewhat different interpretation.

This ends up overriding the intended Predicates set by
PredicateControl on the pseudoinstruction defintions with an empty
list. This allowed incorrectly selecting instructions that should have
been rejected due to the SubtargetPredicate from patterns on the
instruction definition.

This does remove the divergent predicate from the 64-bit shift
patterns, which were already not used for the 32-bit shift, so I'm not
sure what the point was. This also removes a second, redundant copy of
the 64-bit divergent patterns.

Modified:
    llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td
    llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td
    llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td
    llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir
    llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir

Modified: llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td?rev=371427&r1=371426&r2=371427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOP1Instructions.td Mon Sep  9 10:25:35 2019
@@ -107,6 +107,13 @@ multiclass VOP1Inst <string opName, VOPP
   def _sdwa : VOP1_SDWA_Pseudo <opName, P>;
   foreach _ = BoolToList<P.HasExtDPP>.ret in
     def _dpp : VOP1_DPP_Pseudo <opName, P>;
+
+  def : MnemonicAlias<opName#"_e32", opName>, LetDummies;
+  def : MnemonicAlias<opName#"_e64", opName>, LetDummies;
+  def : MnemonicAlias<opName#"_sdwa", opName>, LetDummies;
+
+  foreach _ = BoolToList<P.HasExtDPP>.ret in
+    def : MnemonicAlias<opName#"_dpp", opName>, LetDummies;
 }
 
 // Special profile for instructions which have clamp

Modified: llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td?rev=371427&r1=371426&r2=371427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOP2Instructions.td Mon Sep  9 10:25:35 2019
@@ -539,9 +539,9 @@ defm V_MAX_LEGACY_F32 : VOP2Inst <"v_max
 let SubtargetPredicate = isGFX6GFX7GFX10 in {
 let isCommutable = 1 in {
 defm V_MAC_LEGACY_F32 : VOP2Inst <"v_mac_legacy_f32", VOP_F32_F32_F32>;
-defm V_LSHR_B32 : VOP2Inst <"v_lshr_b32", VOP_I32_I32_I32>;
-defm V_ASHR_I32 : VOP2Inst <"v_ashr_i32", VOP_I32_I32_I32>;
-defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_I32_I32_I32>;
+defm V_LSHR_B32 : VOP2Inst <"v_lshr_b32", VOP_I32_I32_I32, srl>;
+defm V_ASHR_I32 : VOP2Inst <"v_ashr_i32", VOP_I32_I32_I32, sra>;
+defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_I32_I32_I32, shl>;
 } // End isCommutable = 1
 } // End SubtargetPredicate = isGFX6GFX7GFX10
 

Modified: llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td?rev=371427&r1=371426&r2=371427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOP3Instructions.td Mon Sep  9 10:25:35 2019
@@ -385,12 +385,12 @@ def V_TRIG_PREOP_F64 : VOP3Inst <"v_trig
 }
 
 let SchedRW = [Write64Bit] in {
-let SubtargetPredicate = isGFX6GFX7GFX10, Predicates = [isGFX6GFX7GFX10] in {
-def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile<VOP_PAT_GEN<VOP_I64_I64_I32>>, shl>;
-def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile<VOP_PAT_GEN<VOP_I64_I64_I32>>, srl>;
-def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile<VOP_PAT_GEN<VOP_I64_I64_I32>>, sra>;
+let SubtargetPredicate = isGFX6GFX7GFX10 in {
+def V_LSHL_B64 : VOP3Inst <"v_lshl_b64", VOP3_Profile<VOP_I64_I64_I32>, shl>;
+def V_LSHR_B64 : VOP3Inst <"v_lshr_b64", VOP3_Profile<VOP_I64_I64_I32>, srl>;
+def V_ASHR_I64 : VOP3Inst <"v_ashr_i64", VOP3_Profile<VOP_I64_I64_I32>, sra>;
 def V_MULLIT_F32 : VOP3Inst <"v_mullit_f32", VOP3_Profile<VOP_F32_F32_F32_F32>>;
-} // End SubtargetPredicate = isGFX6GFX7GFX10, Predicates = [isGFX6GFX7GFX10]
+} // End SubtargetPredicate = isGFX6GFX7GFX10
 
 let SubtargetPredicate = isGFX8Plus in {
 def V_LSHLREV_B64 : VOP3Inst <"v_lshlrev_b64", VOP3_Profile<VOP_I64_I32_I64>, lshl_rev>;
@@ -399,21 +399,6 @@ def V_ASHRREV_I64 : VOP3Inst <"v_ashrrev
 } // End SubtargetPredicate = isGFX8Plus
 } // End SchedRW = [Write64Bit]
 
-let Predicates = [isGFX8Plus] in {
-def : GCNPat <
- (getDivergentFrag<shl>.ret i64:$x, i32:$y),
- (V_LSHLREV_B64 $y, $x)
->;
-def : AMDGPUPat <
- (getDivergentFrag<srl>.ret i64:$x, i32:$y),
- (V_LSHRREV_B64 $y, $x)
->;
-def : AMDGPUPat <
- (getDivergentFrag<sra>.ret i64:$x, i32:$y),
- (V_ASHRREV_I64 $y, $x)
->;
-}
-
 
 let SchedRW = [Write32Bit] in {
 let SubtargetPredicate = isGFX8Plus in {

Modified: llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td?rev=371427&r1=371426&r2=371427&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td (original)
+++ llvm/trunk/lib/Target/AMDGPU/VOPInstructions.td Mon Sep  9 10:25:35 2019
@@ -14,6 +14,7 @@ class LetDummies {
   bit isReMaterializable;
   bit isAsCheapAsAMove;
   bit VOPAsmPrefer32Bit;
+  bit FPDPRounding;
   Predicate SubtargetPredicate;
   string Constraints;
   string DisableEncoding;
@@ -41,9 +42,7 @@ class VOP_Pseudo <string opName, string
                   string asm, list<dag> pattern> :
   InstSI <outs, ins, asm, pattern>,
   VOP <opName>,
-  SIMCInstr <opName#suffix, SIEncodingFamily.NONE>,
-  MnemonicAlias<opName#suffix, opName> {
-
+  SIMCInstr <opName#suffix, SIEncodingFamily.NONE> {
   let isPseudo = 1;
   let isCodeGenOnly = 1;
   let UseNamedOperandTable = 1;
@@ -473,8 +472,7 @@ class VOP_SDWA9Be<VOPProfile P> : VOP_SD
 class VOP_SDWA_Pseudo <string opName, VOPProfile P, list<dag> pattern=[]> :
   InstSI <P.OutsSDWA, P.InsSDWA, "", pattern>,
   VOP <opName>,
-  SIMCInstr <opName#"_sdwa", SIEncodingFamily.NONE>,
-  MnemonicAlias <opName#"_sdwa", opName> {
+  SIMCInstr <opName#"_sdwa", SIEncodingFamily.NONE> {
 
   let isPseudo = 1;
   let isCodeGenOnly = 1;
@@ -595,8 +593,7 @@ class VOP_DPPe<VOPProfile P, bit IsDPP16
 class VOP_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[]> :
   InstSI <P.OutsDPP, P.InsDPP, OpName#P.AsmDPP, pattern>,
   VOP <OpName>,
-  SIMCInstr <OpName#"_dpp", SIEncodingFamily.NONE>,
-  MnemonicAlias <OpName#"_dpp", OpName> {
+  SIMCInstr <OpName#"_dpp", SIEncodingFamily.NONE> {
 
   let isPseudo = 1;
   let isCodeGenOnly = 1;

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir?rev=371427&r1=371426&r2=371427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-ashr.mir Mon Sep  9 10:25:35 2019
@@ -216,13 +216,13 @@ body: |
     ; GFX6-LABEL: name: ashr_s64_sv
     ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX6: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     ; GFX7-LABEL: name: ashr_s64_sv
     ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX7: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     ; GFX8-LABEL: name: ashr_s64_sv
     ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
@@ -237,8 +237,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX10: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s64) = G_ASHR %0, %1
@@ -256,13 +256,13 @@ body: |
     ; GFX6-LABEL: name: ashr_s64_vs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX6: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     ; GFX7-LABEL: name: ashr_s64_vs
     ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX7: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     ; GFX8-LABEL: name: ashr_s64_vs
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
@@ -277,8 +277,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX10: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s64) = G_ASHR %0, %1
@@ -296,13 +296,13 @@ body: |
     ; GFX6-LABEL: name: ashr_s64_vv
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX6: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     ; GFX7-LABEL: name: ashr_s64_vv
     ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX7: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX7: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     ; GFX8-LABEL: name: ashr_s64_vv
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
@@ -317,11 +317,10 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_ASHRREV_I64_:%[0-9]+]]:vreg_64 = V_ASHRREV_I64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_ASHRREV_I64_]]
+    ; GFX10: [[V_ASHR_I64_:%[0-9]+]]:vreg_64 = V_ASHR_I64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_ASHR_I64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s32) = COPY $vgpr2
     %2:vgpr(s64) = G_ASHR %0, %1
     S_ENDPGM 0, implicit %2
 ...
-

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir?rev=371427&r1=371426&r2=371427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-lshr.mir Mon Sep  9 10:25:35 2019
@@ -216,13 +216,13 @@ body: |
     ; GFX6-LABEL: name: lshr_s64_sv
     ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX6: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     ; GFX7-LABEL: name: lshr_s64_sv
     ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX7: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     ; GFX8-LABEL: name: lshr_s64_sv
     ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
@@ -237,8 +237,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX10: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s64) = G_LSHR %0, %1
@@ -256,13 +256,13 @@ body: |
     ; GFX6-LABEL: name: lshr_s64_vs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX6: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     ; GFX7-LABEL: name: lshr_s64_vs
     ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX7: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     ; GFX8-LABEL: name: lshr_s64_vs
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
@@ -277,8 +277,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX10: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s64) = G_LSHR %0, %1
@@ -296,13 +296,13 @@ body: |
     ; GFX6-LABEL: name: lshr_s64_vv
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX6: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     ; GFX7-LABEL: name: lshr_s64_vv
     ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX7: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX7: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     ; GFX8-LABEL: name: lshr_s64_vv
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
@@ -317,8 +317,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_LSHRREV_B64_:%[0-9]+]]:vreg_64 = V_LSHRREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_LSHRREV_B64_]]
+    ; GFX10: [[V_LSHR_B64_:%[0-9]+]]:vreg_64 = V_LSHR_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_LSHR_B64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s32) = COPY $vgpr2
     %2:vgpr(s64) = G_LSHR %0, %1

Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir?rev=371427&r1=371426&r2=371427&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/inst-select-shl.mir Mon Sep  9 10:25:35 2019
@@ -216,13 +216,13 @@ body: |
     ; GFX6-LABEL: name: shl_s64_sv
     ; GFX6: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX6: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     ; GFX7-LABEL: name: shl_s64_sv
     ; GFX7: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX7: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     ; GFX8-LABEL: name: shl_s64_sv
     ; GFX8: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
@@ -237,8 +237,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:sreg_64_xexec = COPY $sgpr0_sgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
-    ; GFX10: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     %0:sgpr(s64) = COPY $sgpr0_sgpr1
     %1:vgpr(s32) = COPY $vgpr0
     %2:vgpr(s64) = G_SHL %0, %1
@@ -256,13 +256,13 @@ body: |
     ; GFX6-LABEL: name: shl_s64_vs
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX6: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     ; GFX7-LABEL: name: shl_s64_vs
     ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX7: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     ; GFX8-LABEL: name: shl_s64_vs
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
@@ -277,8 +277,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0
-    ; GFX10: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:sgpr(s32) = COPY $sgpr0
     %2:vgpr(s64) = G_SHL %0, %1
@@ -296,13 +296,13 @@ body: |
     ; GFX6-LABEL: name: shl_s64_vv
     ; GFX6: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX6: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX6: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX6: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX6: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX6: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     ; GFX7-LABEL: name: shl_s64_vv
     ; GFX7: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX7: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX7: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX7: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX7: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX7: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     ; GFX8-LABEL: name: shl_s64_vv
     ; GFX8: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX8: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
@@ -317,8 +317,8 @@ body: |
     ; GFX10: $vcc_hi = IMPLICIT_DEF
     ; GFX10: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
     ; GFX10: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr2
-    ; GFX10: [[V_LSHLREV_B64_:%[0-9]+]]:vreg_64 = V_LSHLREV_B64 [[COPY1]], [[COPY]], implicit $exec
-    ; GFX10: S_ENDPGM 0, implicit [[V_LSHLREV_B64_]]
+    ; GFX10: [[V_LSHL_B64_:%[0-9]+]]:vreg_64 = V_LSHL_B64 [[COPY]], [[COPY1]], implicit $exec
+    ; GFX10: S_ENDPGM 0, implicit [[V_LSHL_B64_]]
     %0:vgpr(s64) = COPY $vgpr0_vgpr1
     %1:vgpr(s32) = COPY $vgpr2
     %2:vgpr(s64) = G_SHL %0, %1




More information about the llvm-commits mailing list