[llvm] r374887 - [ARM][MVE] validForTailPredication insts
Sam Parker via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 15 06:12:51 PDT 2019
Author: sam_parker
Date: Tue Oct 15 06:12:51 2019
New Revision: 374887
URL: http://llvm.org/viewvc/llvm-project?rev=374887&view=rev
Log:
[ARM][MVE] validForTailPredication insts
Reverse the logic for valid tail predication instructions and create
a whitelist instead. Added other instruction groups that aren't
obviously safe:
- instructions that 'narrow' their result.
- lane moves.
- byte swapping instructions.
- interleaving loads and stores.
- cross-beat carries.
- top/bottom instructions.
- complex operations.
Hopefully we should be able to add more of these instructions to the
whitelist, once we have a more concrete idea of the transform.
Differential Revision: https://reviews.llvm.org/D67904
Modified:
llvm/trunk/lib/Target/ARM/ARMInstrFormats.td
llvm/trunk/lib/Target/ARM/ARMInstrMVE.td
llvm/trunk/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
llvm/trunk/unittests/Target/ARM/CMakeLists.txt
llvm/trunk/unittests/Target/ARM/MachineInstrTest.cpp
Modified: llvm/trunk/lib/Target/ARM/ARMInstrFormats.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrFormats.td?rev=374887&r1=374886&r2=374887&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrFormats.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrFormats.td Tue Oct 15 06:12:51 2019
@@ -408,7 +408,7 @@ class InstTemplate<AddrMode am, int sz,
// mnemonic (when not in an IT block) or preclude it (when in an IT block).
bit thumbArithFlagSetting = 0;
- bit invalidForTailPredication = 0;
+ bit validForTailPredication = 0;
// If this is a pseudo instruction, mark it isCodeGenOnly.
let isCodeGenOnly = !eq(!cast<string>(f), "Pseudo");
@@ -421,7 +421,7 @@ class InstTemplate<AddrMode am, int sz,
let TSFlags{14} = canXformTo16Bit;
let TSFlags{18-15} = D.Value;
let TSFlags{19} = thumbArithFlagSetting;
- let TSFlags{20} = invalidForTailPredication;
+ let TSFlags{20} = validForTailPredication;
let Constraints = cstr;
let Itinerary = itin;
Modified: llvm/trunk/lib/Target/ARM/ARMInstrMVE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrMVE.td?rev=374887&r1=374886&r2=374887&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrMVE.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrMVE.td Tue Oct 15 06:12:51 2019
@@ -507,7 +507,6 @@ class MVE_VABAV<string suffix, bit U, bi
let Inst{5} = Qm{3};
let Inst{3-1} = Qm{2-0};
let Inst{0} = 0b1;
- let invalidForTailPredication = 1;
}
def MVE_VABAVs8 : MVE_VABAV<"s8", 0b0, 0b00>;
@@ -534,7 +533,6 @@ class MVE_VADDV<string iname, string suf
let Inst{5} = A;
let Inst{3-1} = Qm{2-0};
let Inst{0} = 0b0;
- let invalidForTailPredication = 1;
}
multiclass MVE_VADDV_A<string suffix, bit U, bits<2> size,
@@ -585,7 +583,6 @@ class MVE_VADDLV<string iname, string su
let Inst{5} = A;
let Inst{3-1} = Qm{2-0};
let Inst{0} = 0b0;
- let invalidForTailPredication = 1;
}
multiclass MVE_VADDLV_A<string suffix, bit U, list<dag> pattern=[]> {
@@ -623,7 +620,6 @@ class MVE_VMINMAXNMV<string iname, strin
let Inst{0} = 0b0;
let Predicates = [HasMVEFloat];
- let invalidForTailPredication = 1;
}
multiclass MVE_VMINMAXNMV_fty<string iname, bit bit_7, list<dag> pattern=[]> {
@@ -660,7 +656,6 @@ class MVE_VMINMAXV<string iname, string
let Inst{6-5} = 0b00;
let Inst{3-1} = Qm{2-0};
let Inst{0} = 0b0;
- let invalidForTailPredication = 1;
}
multiclass MVE_VMINMAXV_ty<string iname, bit bit_7, list<dag> pattern=[]> {
@@ -733,7 +728,6 @@ class MVE_VMLAMLSDAV<string iname, strin
let Inst{5} = A;
let Inst{3-1} = Qm{2-0};
let Inst{0} = bit_0;
- let invalidForTailPredication = 1;
}
multiclass MVE_VMLAMLSDAV_A<string iname, string x, string suffix,
@@ -809,7 +803,6 @@ class MVE_VMLALDAVBase<string iname, str
let Inst{5} = A;
let Inst{3-1} = Qm{2-0};
let Inst{0} = bit_0;
- let invalidForTailPredication = 1;
}
multiclass MVE_VMLALDAVBase_A<string iname, string x, string suffix,
@@ -1038,6 +1031,7 @@ def MVE_VBIC : MVE_bit_arith<(outs MQPR:
let Inst{6} = 0b1;
let Inst{4} = 0b1;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
class MVE_VREV<string iname, string suffix, bits<2> size, bits<2> bit_8_7, string cstr="">
@@ -1104,6 +1098,7 @@ def MVE_VMVN : MVE_bit_arith<(outs MQPR:
let Inst{12-6} = 0b0010111;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
let Predicates = [HasMVEInt] in {
@@ -1132,6 +1127,7 @@ class MVE_bit_ops<string iname, bits<2>
let Inst{6} = 0b1;
let Inst{4} = 0b1;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VEOR : MVE_bit_ops<"veor", 0b00, 0b1>;
@@ -1223,6 +1219,7 @@ class MVE_bit_cmode<string iname, string
class MVE_VORR<string suffix, bits<4> cmode, ExpandImm imm_type>
: MVE_bit_cmode<"vorr", suffix, cmode, (ins MQPR:$Qd_src, imm_type:$imm)> {
let Inst{5} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VORRIZ0v4i32 : MVE_VORR<"i32", 0b0001, expzero00>;
@@ -1251,6 +1248,7 @@ def MVE_VMOV : MVEInstAlias<"vmov${vp}\t
class MVE_VBIC<string suffix, bits<4> cmode, ExpandImm imm_type>
: MVE_bit_cmode<"vbic", suffix, cmode, (ins MQPR:$Qd_src, imm_type:$imm)> {
let Inst{5} = 0b1;
+ let validForTailPredication = 1;
}
def MVE_VBICIZ0v4i32 : MVE_VBIC<"i32", 0b0001, expzero00>;
@@ -1490,6 +1488,7 @@ class MVE_VADDSUB<string iname, string s
let Inst{12-8} = 0b01000;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
class MVE_VADD<string suffix, bits<2> size, list<dag> pattern=[]>
@@ -1535,6 +1534,7 @@ class MVE_VQADDSUB<string iname, string
let Inst{8} = 0b0;
let Inst{4} = 0b1;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
ValueType VT = vt;
}
@@ -1587,6 +1587,7 @@ class MVE_VABD_int<string suffix, bit U,
let Inst{12-8} = 0b00111;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VABDs8 : MVE_VABD_int<"s8", 0b0, 0b00>;
@@ -1605,6 +1606,7 @@ class MVE_VRHADD<string suffix, bit U, b
let Inst{12-8} = 0b00001;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VRHADDs8 : MVE_VRHADD<"s8", 0b0, 0b00>;
@@ -1626,6 +1628,7 @@ class MVE_VHADDSUB<string iname, string
let Inst{8} = 0b0;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
class MVE_VHADD<string suffix, bit U, bits<2> size,
@@ -1721,6 +1724,7 @@ class MVE_VDUP<string suffix, bit B, bit
let Inst{6} = 0b0;
let Inst{5} = E;
let Inst{4-0} = 0b10000;
+ let validForTailPredication = 1;
}
def MVE_VDUP32 : MVE_VDUP<"32", 0b0, 0b0>;
@@ -1783,6 +1787,7 @@ class MVE_VCLSCLZ<string iname, string s
let Inst{6} = 0b1;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VCLSs8 : MVE_VCLSCLZ<"vcls", "s8", 0b00, 0b0>;
@@ -1815,6 +1820,7 @@ class MVE_VABSNEG_int<string iname, stri
let Inst{6} = 0b1;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VABSs8 : MVE_VABSNEG_int<"vabs", "s8", 0b00, 0b0>;
@@ -1856,6 +1862,7 @@ class MVE_VQABSNEG<string iname, string
let Inst{6} = 0b1;
let Inst{4} = 0b0;
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VQABSs8 : MVE_VQABSNEG<"vqabs", "s8", 0b00, 0b0>;
@@ -1887,6 +1894,7 @@ class MVE_mod_imm<string iname, string s
let Inst{3-0} = imm{3-0};
let DecoderMethod = "DecodeMVEModImmInstruction";
+ let validForTailPredication = 1;
}
let isReMaterializable = 1 in {
@@ -2282,6 +2290,7 @@ class MVE_shift_by_vec<string iname, str
let Inst{4} = bit_4;
let Inst{3-1} = Qm{2-0};
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
multiclass mve_shift_by_vec_multi<string iname, bit bit_4, bit bit_8> {
@@ -2330,6 +2339,7 @@ class MVE_shift_with_imm<string iname, s
let Inst{4} = 0b1;
let Inst{3-1} = Qm{2-0};
let Inst{0} = 0b0;
+ let validForTailPredication = 1;
}
class MVE_VSxI_imm<string iname, string suffix, bit bit_8, dag imm>
@@ -2342,6 +2352,7 @@ class MVE_VSxI_imm<string iname, string
let Inst{21-16} = imm;
let Inst{10-9} = 0b10;
let Inst{8} = bit_8;
+ let validForTailPredication = 1;
}
def MVE_VSRIimm8 : MVE_VSxI_imm<"vsri", "8", 0b0, (ins shr_imm8:$imm)> {
@@ -2594,6 +2605,7 @@ class MVE_VRINT<string rmode, bits<3> op
let Inst{11-10} = 0b01;
let Inst{9-7} = op{2-0};
let Inst{4} = 0b0;
+ let validForTailPredication = 1;
}
@@ -2656,6 +2668,7 @@ class MVE_VMUL_fp<string suffix, bit siz
let Inst{12-8} = 0b01101;
let Inst{7} = Qn{3};
let Inst{4} = 0b1;
+ let validForTailPredication = 1;
}
def MVE_VMULf32 : MVE_VMUL_fp<"f32", 0b0>;
@@ -2751,8 +2764,10 @@ let Predicates = [HasMVEFloat] in {
}
-def MVE_VADDf32 : MVE_VADDSUBFMA_fp<"vadd", "f32", 0b0, 0b0, 0b1, 0b0>;
-def MVE_VADDf16 : MVE_VADDSUBFMA_fp<"vadd", "f16", 0b1, 0b0, 0b1, 0b0>;
+let validForTailPredication = 1 in {
+ def MVE_VADDf32 : MVE_VADDSUBFMA_fp<"vadd", "f32", 0b0, 0b0, 0b1, 0b0>;
+ def MVE_VADDf16 : MVE_VADDSUBFMA_fp<"vadd", "f16", 0b1, 0b0, 0b1, 0b0>;
+}
let Predicates = [HasMVEFloat] in {
def : Pat<(v4f32 (fadd (v4f32 MQPR:$val1), (v4f32 MQPR:$val2))),
@@ -2761,8 +2776,11 @@ let Predicates = [HasMVEFloat] in {
(v8f16 (MVE_VADDf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2)))>;
}
-def MVE_VSUBf32 : MVE_VADDSUBFMA_fp<"vsub", "f32", 0b0, 0b0, 0b1, 0b1>;
-def MVE_VSUBf16 : MVE_VADDSUBFMA_fp<"vsub", "f16", 0b1, 0b0, 0b1, 0b1>;
+
+let validForTailPredication = 1 in {
+ def MVE_VSUBf32 : MVE_VADDSUBFMA_fp<"vsub", "f32", 0b0, 0b0, 0b1, 0b1>;
+ def MVE_VSUBf16 : MVE_VADDSUBFMA_fp<"vsub", "f16", 0b1, 0b0, 0b1, 0b1>;
+}
let Predicates = [HasMVEFloat] in {
def : Pat<(v4f32 (fsub (v4f32 MQPR:$val1), (v4f32 MQPR:$val2))),
@@ -2812,6 +2830,7 @@ class MVE_VABD_fp<string suffix, bit siz
let Inst{11-8} = 0b1101;
let Inst{7} = Qn{3};
let Inst{4} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VABDf32 : MVE_VABD_fp<"f32", 0b0>;
@@ -2838,6 +2857,7 @@ class MVE_VCVT_fix<string suffix, bit fs
let Inst{4} = 0b1;
let DecoderMethod = "DecodeMVEVCVTt1fp";
+ let validForTailPredication = 1;
}
class MVE_VCVT_imm_asmop<int Bits> : AsmOperandClass {
@@ -2888,6 +2908,7 @@ class MVE_VCVT_fp_int_anpm<string suffix
let Inst{9-8} = rm;
let Inst{7} = op;
let Inst{4} = 0b0;
+ let validForTailPredication = 1;
}
multiclass MVE_VCVT_fp_int_anpm_multi<string suffix, bits<2> size, bit op,
@@ -2922,6 +2943,7 @@ class MVE_VCVT_fp_int<string suffix, bit
let Inst{12-9} = 0b0011;
let Inst{8-7} = op;
let Inst{4} = 0b0;
+ let validForTailPredication = 1;
}
// The unsuffixed VCVT for float->int implicitly rounds toward zero,
@@ -2971,6 +2993,7 @@ class MVE_VABSNEG_fp<string iname, strin
let Inst{11-8} = 0b0111;
let Inst{7} = negate;
let Inst{4} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VABSf16 : MVE_VABSNEG_fp<"vabs", "f16", 0b01, 0b0>;
@@ -3058,6 +3081,7 @@ class MVE_VCMPqq<string suffix, bit bit_
// decoder to emit an operand that isn't affected by any instruction
// bit.
let DecoderMethod = "DecodeMVEVCMP<false," # predtype.DecoderMethod # ">";
+ let validForTailPredication = 1;
}
class MVE_VCMPqqf<string suffix, bit size>
@@ -3122,6 +3146,7 @@ class MVE_VCMPqr<string suffix, bit bit_
let Constraints = "";
// Custom decoder method, for the same reason as MVE_VCMPqq
let DecoderMethod = "DecodeMVEVCMP<true," # predtype.DecoderMethod # ">";
+ let validForTailPredication = 1;
}
class MVE_VCMPqrf<string suffix, bit size>
@@ -3667,6 +3692,7 @@ class MVE_VADDSUB_qr<string iname, strin
let Inst{12} = bit_12;
let Inst{8} = 0b1;
let Inst{5} = bit_5;
+ let validForTailPredication = 1;
}
multiclass MVE_VADDSUB_qr_sizes<string iname, string suffix,
@@ -3737,6 +3763,7 @@ class MVE_VxADDSUB_qr<string iname, stri
let Inst{12} = subtract;
let Inst{8} = 0b1;
let Inst{5} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VHADD_qr_s8 : MVE_VxADDSUB_qr<"vhadd", "s8", 0b0, 0b00, 0b0>;
@@ -3774,6 +3801,7 @@ class MVE_VxSHL_qr<string iname, string
let Inst{12-8} = 0b11110;
let Inst{7} = bit_7;
let Inst{6-4} = 0b110;
+ let validForTailPredication = 1;
}
multiclass MVE_VxSHL_qr_types<string iname, bit bit_7, bit bit_17> {
@@ -3815,6 +3843,7 @@ class MVE_VBRSR<string iname, string suf
let Inst{12} = 0b1;
let Inst{8} = 0b0;
let Inst{5} = 0b1;
+ let validForTailPredication = 1;
}
def MVE_VBRSR8 : MVE_VBRSR<"vbrsr", "8", 0b00>;
@@ -3842,6 +3871,7 @@ class MVE_VMUL_qr_int<string iname, stri
let Inst{12} = 0b1;
let Inst{8} = 0b0;
let Inst{5} = 0b1;
+ let validForTailPredication = 1;
}
def MVE_VMUL_qr_i8 : MVE_VMUL_qr_int<"vmul", "i8", 0b00>;
@@ -3877,7 +3907,7 @@ def MVE_VQRDMULH_qr_s8 : MVE_VxxMUL_qr<
def MVE_VQRDMULH_qr_s16 : MVE_VxxMUL_qr<"vqrdmulh", "s16", 0b1, 0b01>;
def MVE_VQRDMULH_qr_s32 : MVE_VxxMUL_qr<"vqrdmulh", "s32", 0b1, 0b10>;
-let Predicates = [HasMVEFloat] in {
+let Predicates = [HasMVEFloat], validForTailPredication = 1 in {
def MVE_VMUL_qr_f16 : MVE_VxxMUL_qr<"vmul", "f16", 0b1, 0b11>;
def MVE_VMUL_qr_f32 : MVE_VxxMUL_qr<"vmul", "f32", 0b0, 0b11>;
}
@@ -3893,6 +3923,7 @@ class MVE_VFMAMLA_qr<string iname, strin
let Inst{12} = S;
let Inst{8} = 0b0;
let Inst{5} = 0b0;
+ let validForTailPredication = 1;
}
def MVE_VMLA_qr_s8 : MVE_VFMAMLA_qr<"vmla", "s8", 0b0, 0b00, 0b0>;
@@ -3976,6 +4007,7 @@ class MVE_VxDUP<string iname, string suf
let Inst{7} = imm{1};
let Inst{6-1} = 0b110111;
let Inst{0} = imm{0};
+ let validForTailPredication = 1;
}
def MVE_VIDUPu8 : MVE_VxDUP<"vidup", "u8", 0b00, 0b0>;
@@ -4010,6 +4042,7 @@ class MVE_VxWDUP<string iname, string su
let Inst{6-4} = 0b110;
let Inst{3-1} = Rm{3-1};
let Inst{0} = imm{0};
+ let validForTailPredication = 1;
}
def MVE_VIWDUPu8 : MVE_VxWDUP<"viwdup", "u8", 0b00, 0b0>;
@@ -4036,6 +4069,7 @@ class MVE_VCTP<string suffix, bits<2> si
let Constraints = "";
let DecoderMethod = "DecodeMveVCTP";
+ let validForTailPredication = 1;
}
def MVE_VCTP8 : MVE_VCTP<"8", 0b00>;
@@ -4294,6 +4328,7 @@ class MVE_VLDRSTR_base<MVE_ldst_directio
let mayLoad = dir.load;
let mayStore = !eq(dir.load,0);
+ let validForTailPredication = 1;
}
// Contiguous load and store instructions. These come in two main
@@ -4597,6 +4632,7 @@ class MVE_VPT<string suffix, bits<2> siz
let Inst{4} = 0b0;
let Defs = [VPR];
+ let validForTailPredication = 1;
}
class MVE_VPTt1<string suffix, bits<2> size, dag iops>
@@ -4608,6 +4644,7 @@ class MVE_VPTt1<string suffix, bits<2> s
let Inst{5} = Qm{3};
let Inst{3-1} = Qm{2-0};
let Inst{0} = fc{1};
+ let validForTailPredication = 1;
}
class MVE_VPTt1i<string suffix, bits<2> size>
@@ -4709,6 +4746,7 @@ class MVE_VPTf<string suffix, bit size,
let Defs = [VPR];
let Predicates = [HasMVEFloat];
+ let validForTailPredication = 1;
}
class MVE_VPTft1<string suffix, bit size>
@@ -4754,6 +4792,7 @@ def MVE_VPST : MVE_MI<(outs ), (ins vpt_
let Unpredictable{5} = 0b1;
let Defs = [VPR];
+ let validForTailPredication = 1;
}
def MVE_VPSEL : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), NoItinerary,
@@ -4777,6 +4816,7 @@ def MVE_VPSEL : MVE_p<(outs MQPR:$Qd), (
let Inst{4} = 0b0;
let Inst{3-1} = Qm{2-0};
let Inst{0} = 0b1;
+ let validForTailPredication = 1;
}
foreach suffix = ["s8", "s16", "s32", "u8", "u16", "u32",
Modified: llvm/trunk/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h?rev=374887&r1=374886&r2=374887&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h (original)
+++ llvm/trunk/lib/Target/ARM/MCTargetDesc/ARMBaseInfo.h Tue Oct 15 06:12:51 2019
@@ -393,9 +393,8 @@ namespace ARMII {
// in an IT block).
ThumbArithFlagSetting = 1 << 19,
- // Whether an instruction should be excluded from an MVE tail-predicated
- // loop.
- InvalidForTailPredication = 1 << 20,
+ // Whether an instruction can be included in an MVE tail-predicated loop.
+ ValidForTailPredication = 1 << 20,
//===------------------------------------------------------------------===//
// Code domain.
Modified: llvm/trunk/unittests/Target/ARM/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Target/ARM/CMakeLists.txt?rev=374887&r1=374886&r2=374887&view=diff
==============================================================================
--- llvm/trunk/unittests/Target/ARM/CMakeLists.txt (original)
+++ llvm/trunk/unittests/Target/ARM/CMakeLists.txt Tue Oct 15 06:12:51 2019
@@ -1,13 +1,15 @@
include_directories(
- ${CMAKE_SOURCE_DIR}/lib/Target/ARM
- ${CMAKE_BINARY_DIR}/lib/Target/ARM
+ ${LLVM_MAIN_SRC_DIR}/lib/Target/ARM
+ ${LLVM_BINARY_DIR}/lib/Target/ARM
)
set(LLVM_LINK_COMPONENTS
ARMCodeGen
ARMDesc
ARMInfo
+ CodeGen
MC
+ SelectionDAG
Support
Target
)
Modified: llvm/trunk/unittests/Target/ARM/MachineInstrTest.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Target/ARM/MachineInstrTest.cpp?rev=374887&r1=374886&r2=374887&view=diff
==============================================================================
--- llvm/trunk/unittests/Target/ARM/MachineInstrTest.cpp (original)
+++ llvm/trunk/unittests/Target/ARM/MachineInstrTest.cpp Tue Oct 15 06:12:51 2019
@@ -12,13 +12,458 @@ using namespace llvm;
// Test for instructions that aren't immediately obviously valid within a
// tail-predicated loop. This should be marked up in their tablegen
-// descriptions. Currently the horizontal vector operations are tagged.
-// TODO Add instructions that perform:
-// - truncation,
-// - extensions,
-// - byte swapping,
-// - others?
-TEST(MachineInstrInvalidTailPredication, IsCorrect) {
+// descriptions. Currently we, conservatively, disallow:
+// - cross beat carries.
+// - narrowing of results.
+// - top/bottom operations.
+// - complex operations.
+// - horizontal operations.
+// - byte swapping.
+// - interleaved memory instructions.
+// TODO: Add to this list once we can handle them safely.
+TEST(MachineInstrValidTailPredication, IsCorrect) {
+
+ using namespace ARM;
+
+ auto IsValidTPOpcode = [](unsigned Opcode) {
+ switch (Opcode) {
+ default:
+ return false;
+ case MVE_ASRLi:
+ case MVE_ASRLr:
+ case MVE_LSRL:
+ case MVE_SQRSHR:
+ case MVE_SQSHL:
+ case MVE_SRSHR:
+ case MVE_UQRSHL:
+ case MVE_UQSHL:
+ case MVE_URSHR:
+ case MVE_VABDf16:
+ case MVE_VABDf32:
+ case MVE_VABDs16:
+ case MVE_VABDs32:
+ case MVE_VABDs8:
+ case MVE_VABDu16:
+ case MVE_VABDu32:
+ case MVE_VABDu8:
+ case MVE_VABSf16:
+ case MVE_VABSf32:
+ case MVE_VABSs16:
+ case MVE_VABSs32:
+ case MVE_VABSs8:
+ case MVE_VADD_qr_f16:
+ case MVE_VADD_qr_f32:
+ case MVE_VADD_qr_i16:
+ case MVE_VADD_qr_i32:
+ case MVE_VADD_qr_i8:
+ case MVE_VADDf16:
+ case MVE_VADDf32:
+ case MVE_VADDi16:
+ case MVE_VADDi32:
+ case MVE_VADDi8:
+ case MVE_VAND:
+ case MVE_VBIC:
+ case MVE_VBICIZ0v4i32:
+ case MVE_VBICIZ0v8i16:
+ case MVE_VBICIZ16v4i32:
+ case MVE_VBICIZ24v4i32:
+ case MVE_VBICIZ8v4i32:
+ case MVE_VBICIZ8v8i16:
+ case MVE_VBRSR16:
+ case MVE_VBRSR32:
+ case MVE_VBRSR8:
+ case MVE_VCLSs16:
+ case MVE_VCLSs32:
+ case MVE_VCLSs8:
+ case MVE_VCLZs16:
+ case MVE_VCLZs32:
+ case MVE_VCLZs8:
+ case MVE_VCMPf16:
+ case MVE_VCMPf16r:
+ case MVE_VCMPf32:
+ case MVE_VCMPf32r:
+ case MVE_VCMPi16:
+ case MVE_VCMPi16r:
+ case MVE_VCMPi32:
+ case MVE_VCMPi32r:
+ case MVE_VCMPi8:
+ case MVE_VCMPi8r:
+ case MVE_VCMPs16:
+ case MVE_VCMPs16r:
+ case MVE_VCMPs32:
+ case MVE_VCMPs32r:
+ case MVE_VCMPs8:
+ case MVE_VCMPs8r:
+ case MVE_VCMPu16:
+ case MVE_VCMPu16r:
+ case MVE_VCMPu32:
+ case MVE_VCMPu32r:
+ case MVE_VCMPu8:
+ case MVE_VCMPu8r:
+ case MVE_VCTP16:
+ case MVE_VCTP32:
+ case MVE_VCTP64:
+ case MVE_VCTP8:
+ case MVE_VCVTf16s16_fix:
+ case MVE_VCVTf16s16n:
+ case MVE_VCVTf16u16_fix:
+ case MVE_VCVTf16u16n:
+ case MVE_VCVTf32s32_fix:
+ case MVE_VCVTf32s32n:
+ case MVE_VCVTf32u32_fix:
+ case MVE_VCVTf32u32n:
+ case MVE_VCVTs16f16_fix:
+ case MVE_VCVTs16f16a:
+ case MVE_VCVTs16f16m:
+ case MVE_VCVTs16f16n:
+ case MVE_VCVTs16f16p:
+ case MVE_VCVTs16f16z:
+ case MVE_VCVTs32f32_fix:
+ case MVE_VCVTs32f32a:
+ case MVE_VCVTs32f32m:
+ case MVE_VCVTs32f32n:
+ case MVE_VCVTs32f32p:
+ case MVE_VCVTs32f32z:
+ case MVE_VCVTu16f16_fix:
+ case MVE_VCVTu16f16a:
+ case MVE_VCVTu16f16m:
+ case MVE_VCVTu16f16n:
+ case MVE_VCVTu16f16p:
+ case MVE_VCVTu16f16z:
+ case MVE_VCVTu32f32_fix:
+ case MVE_VCVTu32f32a:
+ case MVE_VCVTu32f32m:
+ case MVE_VCVTu32f32n:
+ case MVE_VCVTu32f32p:
+ case MVE_VCVTu32f32z:
+ case MVE_VDDUPu16:
+ case MVE_VDDUPu32:
+ case MVE_VDDUPu8:
+ case MVE_VDUP16:
+ case MVE_VDUP32:
+ case MVE_VDUP8:
+ case MVE_VDWDUPu16:
+ case MVE_VDWDUPu32:
+ case MVE_VDWDUPu8:
+ case MVE_VEOR:
+ case MVE_VFMA_qr_Sf16:
+ case MVE_VFMA_qr_Sf32:
+ case MVE_VFMA_qr_f16:
+ case MVE_VFMA_qr_f32:
+ case MVE_VMLAS_qr_s16:
+ case MVE_VMLAS_qr_s32:
+ case MVE_VMLAS_qr_s8:
+ case MVE_VMLAS_qr_u16:
+ case MVE_VMLAS_qr_u32:
+ case MVE_VMLAS_qr_u8:
+ case MVE_VMLA_qr_s16:
+ case MVE_VMLA_qr_s32:
+ case MVE_VMLA_qr_s8:
+ case MVE_VMLA_qr_u16:
+ case MVE_VMLA_qr_u32:
+ case MVE_VMLA_qr_u8:
+ case MVE_VHADD_qr_s16:
+ case MVE_VHADD_qr_s32:
+ case MVE_VHADD_qr_s8:
+ case MVE_VHADD_qr_u16:
+ case MVE_VHADD_qr_u32:
+ case MVE_VHADD_qr_u8:
+ case MVE_VHADDs16:
+ case MVE_VHADDs32:
+ case MVE_VHADDs8:
+ case MVE_VHADDu16:
+ case MVE_VHADDu32:
+ case MVE_VHADDu8:
+ case MVE_VHSUB_qr_s16:
+ case MVE_VHSUB_qr_s32:
+ case MVE_VHSUB_qr_s8:
+ case MVE_VHSUB_qr_u16:
+ case MVE_VHSUB_qr_u32:
+ case MVE_VHSUB_qr_u8:
+ case MVE_VHSUBs16:
+ case MVE_VHSUBs32:
+ case MVE_VHSUBs8:
+ case MVE_VHSUBu16:
+ case MVE_VHSUBu32:
+ case MVE_VHSUBu8:
+ case MVE_VIDUPu16:
+ case MVE_VIDUPu32:
+ case MVE_VIDUPu8:
+ case MVE_VIWDUPu16:
+ case MVE_VIWDUPu32:
+ case MVE_VIWDUPu8:
+ case MVE_VLDRBS16:
+ case MVE_VLDRBS16_post:
+ case MVE_VLDRBS16_pre:
+ case MVE_VLDRBS16_rq:
+ case MVE_VLDRBS32:
+ case MVE_VLDRBS32_post:
+ case MVE_VLDRBS32_pre:
+ case MVE_VLDRBS32_rq:
+ case MVE_VLDRBU16:
+ case MVE_VLDRBU16_post:
+ case MVE_VLDRBU16_pre:
+ case MVE_VLDRBU16_rq:
+ case MVE_VLDRBU32:
+ case MVE_VLDRBU32_post:
+ case MVE_VLDRBU32_pre:
+ case MVE_VLDRBU32_rq:
+ case MVE_VLDRBU8:
+ case MVE_VLDRBU8_post:
+ case MVE_VLDRBU8_pre:
+ case MVE_VLDRBU8_rq:
+ case MVE_VLDRDU64_qi:
+ case MVE_VLDRDU64_qi_pre:
+ case MVE_VLDRDU64_rq:
+ case MVE_VLDRDU64_rq_u:
+ case MVE_VLDRHS32:
+ case MVE_VLDRHS32_post:
+ case MVE_VLDRHS32_pre:
+ case MVE_VLDRHS32_rq:
+ case MVE_VLDRHS32_rq_u:
+ case MVE_VLDRHU16:
+ case MVE_VLDRHU16_post:
+ case MVE_VLDRHU16_pre:
+ case MVE_VLDRHU16_rq:
+ case MVE_VLDRHU16_rq_u:
+ case MVE_VLDRHU32:
+ case MVE_VLDRHU32_post:
+ case MVE_VLDRHU32_pre:
+ case MVE_VLDRHU32_rq:
+ case MVE_VLDRHU32_rq_u:
+ case MVE_VLDRWU32:
+ case MVE_VLDRWU32_post:
+ case MVE_VLDRWU32_pre:
+ case MVE_VLDRWU32_qi:
+ case MVE_VLDRWU32_qi_pre:
+ case MVE_VLDRWU32_rq:
+ case MVE_VLDRWU32_rq_u:
+ case MVE_VMOVimmf32:
+ case MVE_VMOVimmi16:
+ case MVE_VMOVimmi32:
+ case MVE_VMOVimmi64:
+ case MVE_VMOVimmi8:
+ case MVE_VMUL_qr_f16:
+ case MVE_VMUL_qr_f32:
+ case MVE_VMUL_qr_i16:
+ case MVE_VMUL_qr_i32:
+ case MVE_VMUL_qr_i8:
+ case MVE_VMULf16:
+ case MVE_VMULf32:
+ case MVE_VMVN:
+ case MVE_VMVNimmi16:
+ case MVE_VMVNimmi32:
+ case MVE_VNEGf16:
+ case MVE_VNEGf32:
+ case MVE_VNEGs16:
+ case MVE_VNEGs32:
+ case MVE_VNEGs8:
+ case MVE_VORN:
+ case MVE_VORR:
+ case MVE_VORRIZ0v4i32:
+ case MVE_VORRIZ0v8i16:
+ case MVE_VORRIZ16v4i32:
+ case MVE_VORRIZ24v4i32:
+ case MVE_VORRIZ8v4i32:
+ case MVE_VORRIZ8v8i16:
+ case MVE_VPNOT:
+ case MVE_VPSEL:
+ case MVE_VPST:
+ case MVE_VPTv16i8:
+ case MVE_VPTv16i8r:
+ case MVE_VPTv16s8:
+ case MVE_VPTv16s8r:
+ case MVE_VPTv16u8:
+ case MVE_VPTv16u8r:
+ case MVE_VPTv4f32:
+ case MVE_VPTv4f32r:
+ case MVE_VPTv4i32:
+ case MVE_VPTv4i32r:
+ case MVE_VPTv4s32:
+ case MVE_VPTv4s32r:
+ case MVE_VPTv4u32:
+ case MVE_VPTv4u32r:
+ case MVE_VPTv8f16:
+ case MVE_VPTv8f16r:
+ case MVE_VPTv8i16:
+ case MVE_VPTv8i16r:
+ case MVE_VPTv8s16:
+ case MVE_VPTv8s16r:
+ case MVE_VPTv8u16:
+ case MVE_VPTv8u16r:
+ case MVE_VQABSs16:
+ case MVE_VQABSs32:
+ case MVE_VQABSs8:
+ case MVE_VQADD_qr_s16:
+ case MVE_VQADD_qr_s32:
+ case MVE_VQADD_qr_s8:
+ case MVE_VQADD_qr_u16:
+ case MVE_VQADD_qr_u32:
+ case MVE_VQADD_qr_u8:
+ case MVE_VQADDs16:
+ case MVE_VQADDs32:
+ case MVE_VQADDs8:
+ case MVE_VQADDu16:
+ case MVE_VQADDu32:
+ case MVE_VQADDu8:
+ case MVE_VQNEGs16:
+ case MVE_VQNEGs32:
+ case MVE_VQNEGs8:
+ case MVE_VQRSHL_by_vecs16:
+ case MVE_VQRSHL_by_vecs32:
+ case MVE_VQRSHL_by_vecs8:
+ case MVE_VQRSHL_by_vecu16:
+ case MVE_VQRSHL_by_vecu32:
+ case MVE_VQRSHL_by_vecu8:
+ case MVE_VQRSHL_qrs16:
+ case MVE_VQRSHL_qrs32:
+ case MVE_VQRSHL_qrs8:
+ case MVE_VQRSHL_qru16:
+ case MVE_VQRSHL_qru8:
+ case MVE_VQRSHL_qru32:
+ case MVE_VQSHLU_imms16:
+ case MVE_VQSHLU_imms32:
+ case MVE_VQSHLU_imms8:
+ case MVE_VQSHL_by_vecs16:
+ case MVE_VQSHL_by_vecs32:
+ case MVE_VQSHL_by_vecs8:
+ case MVE_VQSHL_by_vecu16:
+ case MVE_VQSHL_by_vecu32:
+ case MVE_VQSHL_by_vecu8:
+ case MVE_VQSHL_qrs16:
+ case MVE_VQSHL_qrs32:
+ case MVE_VQSHL_qrs8:
+ case MVE_VQSHL_qru16:
+ case MVE_VQSHL_qru32:
+ case MVE_VQSHL_qru8:
+ case MVE_VQSUB_qr_s16:
+ case MVE_VQSUB_qr_s32:
+ case MVE_VQSUB_qr_s8:
+ case MVE_VQSUB_qr_u16:
+ case MVE_VQSUB_qr_u32:
+ case MVE_VQSUB_qr_u8:
+ case MVE_VQSUBs16:
+ case MVE_VQSUBs32:
+ case MVE_VQSUBs8:
+ case MVE_VQSUBu16:
+ case MVE_VQSUBu32:
+ case MVE_VQSUBu8:
+ case MVE_VRHADDs16:
+ case MVE_VRHADDs32:
+ case MVE_VRHADDs8:
+ case MVE_VRHADDu16:
+ case MVE_VRHADDu32:
+ case MVE_VRHADDu8:
+ case MVE_VRINTf16A:
+ case MVE_VRINTf16M:
+ case MVE_VRINTf16N:
+ case MVE_VRINTf16P:
+ case MVE_VRINTf16X:
+ case MVE_VRINTf16Z:
+ case MVE_VRINTf32A:
+ case MVE_VRINTf32M:
+ case MVE_VRINTf32N:
+ case MVE_VRINTf32P:
+ case MVE_VRINTf32X:
+ case MVE_VRINTf32Z:
+ case MVE_VRSHL_by_vecs16:
+ case MVE_VRSHL_by_vecs32:
+ case MVE_VRSHL_by_vecs8:
+ case MVE_VRSHL_by_vecu16:
+ case MVE_VRSHL_by_vecu32:
+ case MVE_VRSHL_by_vecu8:
+ case MVE_VRSHL_qrs16:
+ case MVE_VRSHL_qrs32:
+ case MVE_VRSHL_qrs8:
+ case MVE_VRSHL_qru16:
+ case MVE_VRSHL_qru32:
+ case MVE_VRSHL_qru8:
+ case MVE_VRSHR_imms16:
+ case MVE_VRSHR_imms32:
+ case MVE_VRSHR_imms8:
+ case MVE_VRSHR_immu16:
+ case MVE_VRSHR_immu32:
+ case MVE_VRSHR_immu8:
+ case MVE_VSHL_by_vecs16:
+ case MVE_VSHL_by_vecs32:
+ case MVE_VSHL_by_vecs8:
+ case MVE_VSHL_by_vecu16:
+ case MVE_VSHL_by_vecu32:
+ case MVE_VSHL_by_vecu8:
+ case MVE_VSHL_immi16:
+ case MVE_VSHL_immi32:
+ case MVE_VSHL_immi8:
+ case MVE_VSHL_qrs16:
+ case MVE_VSHL_qrs32:
+ case MVE_VSHL_qrs8:
+ case MVE_VSHL_qru16:
+ case MVE_VSHL_qru32:
+ case MVE_VSHL_qru8:
+ case MVE_VSHR_imms16:
+ case MVE_VSHR_imms32:
+ case MVE_VSHR_imms8:
+ case MVE_VSHR_immu16:
+ case MVE_VSHR_immu32:
+ case MVE_VSHR_immu8:
+ case MVE_VSLIimm16:
+ case MVE_VSLIimm32:
+ case MVE_VSLIimm8:
+ case MVE_VSLIimms16:
+ case MVE_VSLIimms32:
+ case MVE_VSLIimms8:
+ case MVE_VSLIimmu16:
+ case MVE_VSLIimmu32:
+ case MVE_VSLIimmu8:
+ case MVE_VSRIimm16:
+ case MVE_VSRIimm32:
+ case MVE_VSRIimm8:
+ case MVE_VSTRB16:
+ case MVE_VSTRB16_post:
+ case MVE_VSTRB16_pre:
+ case MVE_VSTRB16_rq:
+ case MVE_VSTRB32:
+ case MVE_VSTRB32_post:
+ case MVE_VSTRB32_pre:
+ case MVE_VSTRB32_rq:
+ case MVE_VSTRB8_rq:
+ case MVE_VSTRBU8:
+ case MVE_VSTRBU8_post:
+ case MVE_VSTRBU8_pre:
+ case MVE_VSTRD64_qi:
+ case MVE_VSTRD64_qi_pre:
+ case MVE_VSTRD64_rq:
+ case MVE_VSTRD64_rq_u:
+ case MVE_VSTRH16_rq:
+ case MVE_VSTRH16_rq_u:
+ case MVE_VSTRH32:
+ case MVE_VSTRH32_post:
+ case MVE_VSTRH32_pre:
+ case MVE_VSTRH32_rq:
+ case MVE_VSTRH32_rq_u:
+ case MVE_VSTRHU16:
+ case MVE_VSTRHU16_post:
+ case MVE_VSTRHU16_pre:
+ case MVE_VSTRW32_qi:
+ case MVE_VSTRW32_qi_pre:
+ case MVE_VSTRW32_rq:
+ case MVE_VSTRW32_rq_u:
+ case MVE_VSTRWU32:
+ case MVE_VSTRWU32_post:
+ case MVE_VSTRWU32_pre:
+ case MVE_VSUB_qr_f16:
+ case MVE_VSUB_qr_f32:
+ case MVE_VSUB_qr_i16:
+ case MVE_VSUB_qr_i32:
+ case MVE_VSUB_qr_i8:
+ case MVE_VSUBf16:
+ case MVE_VSUBf32:
+ case MVE_VSUBi16:
+ case MVE_VSUBi32:
+ case MVE_VSUBi8:
+ return true;
+ }
+ };
+
LLVMInitializeARMTargetInfo();
LLVMInitializeARMTarget();
LLVMInitializeARMTargetMC();
@@ -36,131 +481,27 @@ TEST(MachineInstrInvalidTailPredication,
static_cast<LLVMTargetMachine*>(
T->createTargetMachine(TT, "generic", "", Options, None, None,
CodeGenOpt::Default)));
+ ARMSubtarget ST(TM->getTargetTriple(), TM->getTargetCPU(),
+ TM->getTargetFeatureString(),
+ *static_cast<const ARMBaseTargetMachine*>(TM.get()), false);
+ const ARMBaseInstrInfo *TII = ST.getInstrInfo();
auto MII = TM->getMCInstrInfo();
- using namespace ARM;
+ for (unsigned i = 0; i < ARM::INSTRUCTION_LIST_END; ++i) {
+ const MCInstrDesc &Desc = TII->get(i);
- auto IsInvalidTPOpcode = [](unsigned Opcode) {
- switch (Opcode) {
- case MVE_VABAVs8:
- case MVE_VABAVs16:
- case MVE_VABAVs32:
- case MVE_VABAVu8:
- case MVE_VABAVu16:
- case MVE_VABAVu32:
- case MVE_VADDVs8acc:
- case MVE_VADDVs16acc:
- case MVE_VADDVs32acc:
- case MVE_VADDVu8acc:
- case MVE_VADDVu16acc:
- case MVE_VADDVu32acc:
- case MVE_VADDVs8no_acc:
- case MVE_VADDVs16no_acc:
- case MVE_VADDVs32no_acc:
- case MVE_VADDVu8no_acc:
- case MVE_VADDVu16no_acc:
- case MVE_VADDVu32no_acc:
- case MVE_VADDLVs32no_acc:
- case MVE_VADDLVu32no_acc:
- case MVE_VADDLVs32acc:
- case MVE_VADDLVu32acc:
- case MVE_VMLADAVas16:
- case MVE_VMLADAVas32:
- case MVE_VMLADAVas8:
- case MVE_VMLADAVau16:
- case MVE_VMLADAVau32:
- case MVE_VMLADAVau8:
- case MVE_VMLADAVaxs16:
- case MVE_VMLADAVaxs32:
- case MVE_VMLADAVaxs8:
- case MVE_VMLADAVs16:
- case MVE_VMLADAVs32:
- case MVE_VMLADAVs8:
- case MVE_VMLADAVu16:
- case MVE_VMLADAVu32:
- case MVE_VMLADAVu8:
- case MVE_VMLADAVxs16:
- case MVE_VMLADAVxs32:
- case MVE_VMLADAVxs8:
- case MVE_VMLALDAVas16:
- case MVE_VMLALDAVas32:
- case MVE_VMLALDAVau16:
- case MVE_VMLALDAVau32:
- case MVE_VMLALDAVaxs16:
- case MVE_VMLALDAVaxs32:
- case MVE_VMLALDAVs16:
- case MVE_VMLALDAVs32:
- case MVE_VMLALDAVu16:
- case MVE_VMLALDAVu32:
- case MVE_VMLALDAVxs16:
- case MVE_VMLALDAVxs32:
- case MVE_VMLSDAVas16:
- case MVE_VMLSDAVas32:
- case MVE_VMLSDAVas8:
- case MVE_VMLSDAVaxs16:
- case MVE_VMLSDAVaxs32:
- case MVE_VMLSDAVaxs8:
- case MVE_VMLSDAVs16:
- case MVE_VMLSDAVs32:
- case MVE_VMLSDAVs8:
- case MVE_VMLSDAVxs16:
- case MVE_VMLSDAVxs32:
- case MVE_VMLSDAVxs8:
- case MVE_VMLSLDAVas16:
- case MVE_VMLSLDAVas32:
- case MVE_VMLSLDAVaxs16:
- case MVE_VMLSLDAVaxs32:
- case MVE_VMLSLDAVs16:
- case MVE_VMLSLDAVs32:
- case MVE_VMLSLDAVxs16:
- case MVE_VMLSLDAVxs32:
- case MVE_VRMLALDAVHas32:
- case MVE_VRMLALDAVHau32:
- case MVE_VRMLALDAVHaxs32:
- case MVE_VRMLALDAVHs32:
- case MVE_VRMLALDAVHu32:
- case MVE_VRMLALDAVHxs32:
- case MVE_VRMLSLDAVHas32:
- case MVE_VRMLSLDAVHaxs32:
- case MVE_VRMLSLDAVHs32:
- case MVE_VRMLSLDAVHxs32:
- case MVE_VMAXNMVf16:
- case MVE_VMINNMVf16:
- case MVE_VMAXNMVf32:
- case MVE_VMINNMVf32:
- case MVE_VMAXNMAVf16:
- case MVE_VMINNMAVf16:
- case MVE_VMAXNMAVf32:
- case MVE_VMINNMAVf32:
- case MVE_VMAXVs8:
- case MVE_VMAXVs16:
- case MVE_VMAXVs32:
- case MVE_VMAXVu8:
- case MVE_VMAXVu16:
- case MVE_VMAXVu32:
- case MVE_VMINVs8:
- case MVE_VMINVs16:
- case MVE_VMINVs32:
- case MVE_VMINVu8:
- case MVE_VMINVu16:
- case MVE_VMINVu32:
- case MVE_VMAXAVs8:
- case MVE_VMAXAVs16:
- case MVE_VMAXAVs32:
- case MVE_VMINAVs8:
- case MVE_VMINAVs16:
- case MVE_VMINAVs32:
- return true;
- default:
- return false;
- }
- };
+ for (auto &Op : Desc.operands()) {
+ // Only check instructions that access the MQPR regs.
+ if ((Op.OperandType & MCOI::OPERAND_REGISTER) == 0 ||
+ Op.RegClass != ARM::MQPRRegClassID)
+ continue;
- for (unsigned i = 0; i < ARM::INSTRUCTION_LIST_END; ++i) {
- uint64_t Flags = MII->get(i).TSFlags;
- bool Invalid = (Flags & ARMII::InvalidForTailPredication) != 0;
- ASSERT_EQ(IsInvalidTPOpcode(i), Invalid)
- << MII->getName(i)
- << ": mismatched expectation for tail-predicated safety\n";
+ uint64_t Flags = MII->get(i).TSFlags;
+ bool Valid = (Flags & ARMII::ValidForTailPredication) != 0;
+ ASSERT_EQ(IsValidTPOpcode(i), Valid)
+ << MII->getName(i)
+ << ": mismatched expectation for tail-predicated safety\n";
+ break;
+ }
}
}
More information about the llvm-commits
mailing list