[llvm] f6a5699 - [AMDGPU][TableGen] Make more use of !ne !not !and !or. NFC.
Jay Foad via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 21 01:57:00 PDT 2020
Author: Jay Foad
Date: 2020-10-21T09:56:43+01:00
New Revision: f6a5699c6cb5df03d9e50c17fd47edab3fefd6bf
URL: https://github.com/llvm/llvm-project/commit/f6a5699c6cb5df03d9e50c17fd47edab3fefd6bf
DIFF: https://github.com/llvm/llvm-project/commit/f6a5699c6cb5df03d9e50c17fd47edab3fefd6bf.diff
LOG: [AMDGPU][TableGen] Make more use of !ne !not !and !or. NFC.
Added:
Modified:
llvm/lib/Target/AMDGPU/BUFInstructions.td
llvm/lib/Target/AMDGPU/DSInstructions.td
llvm/lib/Target/AMDGPU/FLATInstructions.td
llvm/lib/Target/AMDGPU/MIMGInstructions.td
llvm/lib/Target/AMDGPU/SIInstrFormats.td
llvm/lib/Target/AMDGPU/SIInstrInfo.td
llvm/lib/Target/AMDGPU/SIRegisterInfo.td
llvm/lib/Target/AMDGPU/VOP3Instructions.td
llvm/lib/Target/AMDGPU/VOP3PInstructions.td
llvm/lib/Target/AMDGPU/VOPInstructions.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/AMDGPU/BUFInstructions.td b/llvm/lib/Target/AMDGPU/BUFInstructions.td
index e1c9f1609a02..763b2f0ef80e 100644
--- a/llvm/lib/Target/AMDGPU/BUFInstructions.td
+++ b/llvm/lib/Target/AMDGPU/BUFInstructions.td
@@ -183,15 +183,15 @@ class getMTBUFAsmOps<int addrKind> {
}
class MTBUF_SetupAddr<int addrKind> {
- bits<1> offen = !if(!eq(addrKind, BUFAddrKind.OffEn), 1,
- !if(!eq(addrKind, BUFAddrKind.BothEn), 1 , 0));
+ bits<1> offen = !or(!eq(addrKind, BUFAddrKind.OffEn),
+ !eq(addrKind, BUFAddrKind.BothEn));
- bits<1> idxen = !if(!eq(addrKind, BUFAddrKind.IdxEn), 1,
- !if(!eq(addrKind, BUFAddrKind.BothEn), 1 , 0));
+ bits<1> idxen = !or(!eq(addrKind, BUFAddrKind.IdxEn),
+ !eq(addrKind, BUFAddrKind.BothEn));
- bits<1> addr64 = !if(!eq(addrKind, BUFAddrKind.Addr64), 1, 0);
+ bits<1> addr64 = !eq(addrKind, BUFAddrKind.Addr64);
- bits<1> has_vaddr = !if(!eq(addrKind, BUFAddrKind.Offset), 0, 1);
+ bits<1> has_vaddr = !ne(addrKind, BUFAddrKind.Offset);
}
class MTBUF_Load_Pseudo <string opName,
@@ -462,15 +462,15 @@ class getMUBUFAsmOps<int addrKind> {
}
class MUBUF_SetupAddr<int addrKind> {
- bits<1> offen = !if(!eq(addrKind, BUFAddrKind.OffEn), 1,
- !if(!eq(addrKind, BUFAddrKind.BothEn), 1 , 0));
+ bits<1> offen = !or(!eq(addrKind, BUFAddrKind.OffEn),
+ !eq(addrKind, BUFAddrKind.BothEn));
- bits<1> idxen = !if(!eq(addrKind, BUFAddrKind.IdxEn), 1,
- !if(!eq(addrKind, BUFAddrKind.BothEn), 1 , 0));
+ bits<1> idxen = !or(!eq(addrKind, BUFAddrKind.IdxEn),
+ !eq(addrKind, BUFAddrKind.BothEn));
- bits<1> addr64 = !if(!eq(addrKind, BUFAddrKind.Addr64), 1, 0);
+ bits<1> addr64 = !eq(addrKind, BUFAddrKind.Addr64);
- bits<1> has_vaddr = !if(!eq(addrKind, BUFAddrKind.Offset), 0, 1);
+ bits<1> has_vaddr = !ne(addrKind, BUFAddrKind.Offset);
}
class MUBUF_Load_Pseudo <string opName,
@@ -498,7 +498,7 @@ class MUBUF_Load_Pseudo <string opName,
let mayStore = 0;
let maybeAtomic = 1;
let Uses = !if(isLds, [EXEC, M0], [EXEC]);
- let has_tfe = !if(isLds, 0, 1);
+ let has_tfe = !not(isLds);
let lds = isLds;
let elements = getMUBUFElements<vdata_vt>.ret;
}
@@ -1857,7 +1857,7 @@ class Base_MUBUF_Real_gfx6_gfx7_gfx10<bits<7> op, MUBUF_Pseudo ps, int ef> :
let Inst{12} = ps.offen;
let Inst{13} = ps.idxen;
let Inst{14} = !if(ps.has_glc, glc, ps.glc_value);
- let Inst{16} = !if(ps.lds, 1, 0);
+ let Inst{16} = ps.lds;
let Inst{24-18} = op;
let Inst{31-26} = 0x38;
let Inst{39-32} = !if(ps.has_vaddr, vaddr, ?);
@@ -2208,7 +2208,7 @@ class MUBUF_Real_vi <bits<7> op, MUBUF_Pseudo ps> :
let Inst{12} = ps.offen;
let Inst{13} = ps.idxen;
let Inst{14} = !if(ps.has_glc, glc, ps.glc_value);
- let Inst{16} = !if(ps.lds, 1, 0);
+ let Inst{16} = ps.lds;
let Inst{17} = !if(ps.has_slc, slc, ?);
let Inst{24-18} = op;
let Inst{31-26} = 0x38; //encoding
@@ -2258,7 +2258,7 @@ class MUBUF_Real_gfx80 <bits<7> op, MUBUF_Pseudo ps> :
let Inst{12} = ps.offen;
let Inst{13} = ps.idxen;
let Inst{14} = !if(ps.has_glc, glc, ps.glc_value);
- let Inst{16} = !if(ps.lds, 1, 0);
+ let Inst{16} = ps.lds;
let Inst{17} = !if(ps.has_slc, slc, ?);
let Inst{24-18} = op;
let Inst{31-26} = 0x38; //encoding
diff --git a/llvm/lib/Target/AMDGPU/DSInstructions.td b/llvm/lib/Target/AMDGPU/DSInstructions.td
index 96345d07c95d..6f6cd2364703 100644
--- a/llvm/lib/Target/AMDGPU/DSInstructions.td
+++ b/llvm/lib/Target/AMDGPU/DSInstructions.td
@@ -166,12 +166,12 @@ class DS_1A1D_RET <string opName, RegisterClass rc = VGPR_32>
multiclass DS_1A1D_RET_mc <string opName, RegisterClass rc = VGPR_32,
string NoRetOp = ""> {
def "" : DS_1A1D_RET<opName, rc>,
- AtomicNoRet<NoRetOp, !if(!eq(NoRetOp, ""), 0, 1)>;
+ AtomicNoRet<NoRetOp, !ne(NoRetOp, "")>;
let has_m0_read = 0 in {
def _gfx9 : DS_1A1D_RET<opName, rc>,
AtomicNoRet<!if(!eq(NoRetOp, ""), "", NoRetOp#"_gfx9"),
- !if(!eq(NoRetOp, ""), 0, 1)>;
+ !ne(NoRetOp, "")>;
}
}
@@ -191,11 +191,11 @@ multiclass DS_1A2D_RET_mc<string opName,
string NoRetOp = "",
RegisterClass src = rc> {
def "" : DS_1A2D_RET<opName, rc, src>,
- AtomicNoRet<NoRetOp, !if(!eq(NoRetOp, ""), 0, 1)>;
+ AtomicNoRet<NoRetOp, !ne(NoRetOp, "")>;
let has_m0_read = 0 in {
def _gfx9 : DS_1A2D_RET<opName, rc, src>,
- AtomicNoRet<NoRetOp#"_gfx9", !if(!eq(NoRetOp, ""), 0, 1)>;
+ AtomicNoRet<NoRetOp#"_gfx9", !ne(NoRetOp, "")>;
}
}
diff --git a/llvm/lib/Target/AMDGPU/FLATInstructions.td b/llvm/lib/Target/AMDGPU/FLATInstructions.td
index e7f6b0ca3b55..29a350d5d2a3 100644
--- a/llvm/lib/Target/AMDGPU/FLATInstructions.td
+++ b/llvm/lib/Target/AMDGPU/FLATInstructions.td
@@ -63,9 +63,9 @@ class FLAT_Pseudo<string opName, dag outs, dag ins,
// Buffer instruction; so, they increment both VM_CNT and LGKM_CNT
// and are not considered done until both have been decremented.
let VM_CNT = 1;
- let LGKM_CNT = !if(!or(is_flat_global, is_flat_scratch), 0, 1);
+ let LGKM_CNT = !not(!or(is_flat_global, is_flat_scratch));
- let IsNonFlatSeg = !if(!or(is_flat_global, is_flat_scratch), 1, 0);
+ let IsNonFlatSeg = !or(is_flat_global, is_flat_scratch);
}
class FLAT_Real <bits<7> op, FLAT_Pseudo ps> :
diff --git a/llvm/lib/Target/AMDGPU/MIMGInstructions.td b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
index f56b8728e64c..e9ee87283553 100644
--- a/llvm/lib/Target/AMDGPU/MIMGInstructions.td
+++ b/llvm/lib/Target/AMDGPU/MIMGInstructions.td
@@ -148,7 +148,7 @@ class MIMG_Base <dag outs, string dns = "">
let hasSideEffects = 0; // XXX ????
let DecoderNamespace = dns;
- let isAsmParserOnly = !if(!eq(dns,""), 1, 0);
+ let isAsmParserOnly = !eq(dns, "");
}
class MIMG <dag outs, string dns = "">
@@ -308,13 +308,13 @@ multiclass MIMG_NoSampler_Src_Helper <bits<8> op, string asm,
multiclass MIMG_NoSampler <bits<8> op, string asm, bit has_d16, bit mip = 0,
bit isResInfo = 0> {
def "" : MIMGBaseOpcode {
- let Coordinates = !if(isResInfo, 0, 1);
+ let Coordinates = !not(isResInfo);
let LodOrClampOrMip = mip;
let HasD16 = has_d16;
}
let BaseOpcode = !cast<MIMGBaseOpcode>(NAME),
- mayLoad = !if(isResInfo, 0, 1) in {
+ mayLoad = !not(isResInfo) in {
let VDataDwords = 1 in
defm _V1 : MIMG_NoSampler_Src_Helper <op, asm, VGPR_32, 1>;
let VDataDwords = 2 in
@@ -665,12 +665,12 @@ multiclass MIMG_Sampler <bits<8> op, AMDGPUSampleVariant sample, bit wqm = 0,
bit isG16 = 0, bit isGetLod = 0,
string asm = "image_sample"#sample.LowerCaseMod#!if(isG16, "_g16", "")> {
def "" : MIMG_Sampler_BaseOpcode<sample> {
- let HasD16 = !if(isGetLod, 0, 1);
+ let HasD16 = !not(isGetLod);
let G16 = isG16;
}
let BaseOpcode = !cast<MIMGBaseOpcode>(NAME), WQM = wqm,
- mayLoad = !if(isGetLod, 0, 1) in {
+ mayLoad = !not(isGetLod) in {
let VDataDwords = 1 in
defm _V1 : MIMG_Sampler_Src_Helper<op, asm, sample, VGPR_32, 1>;
let VDataDwords = 2 in
@@ -712,8 +712,8 @@ class MIMG_IntersectRay_gfx10<int op, string opcode, RegisterClass AddrRC, bit A
: MIMG_gfx10<op, (outs VReg_128:$vdata), "AMDGPU"> {
let InOperandList = !con((ins AddrRC:$vaddr0, SReg_128:$srsrc),
- !if(!eq(A16,1), (ins GFX10A16:$a16), (ins)));
- let AsmString = opcode#" $vdata, $vaddr0, $srsrc"#!if(!eq(A16,1), "$a16", "");
+ !if(A16, (ins GFX10A16:$a16), (ins)));
+ let AsmString = opcode#" $vdata, $vaddr0, $srsrc"#!if(A16, "$a16", "");
let nsa = 0;
}
@@ -722,15 +722,15 @@ class MIMG_IntersectRay_nsa_gfx10<int op, string opcode, int num_addrs, bit A16>
: MIMG_nsa_gfx10<op, (outs VReg_128:$vdata), num_addrs, "AMDGPU"> {
let InOperandList = !con(nsah.AddrIns,
(ins SReg_128:$srsrc),
- !if(!eq(A16,1), (ins GFX10A16:$a16), (ins)));
- let AsmString = opcode#" $vdata, "#nsah.AddrAsm#", $srsrc"#!if(!eq(A16,1), "$a16", "");
+ !if(A16, (ins GFX10A16:$a16), (ins)));
+ let AsmString = opcode#" $vdata, "#nsah.AddrAsm#", $srsrc"#!if(A16, "$a16", "");
}
multiclass MIMG_IntersectRay<int op, string opcode, int num_addrs, bit A16> {
def "" : MIMGBaseOpcode;
let SubtargetPredicate = HasGFX10_BEncoding,
AssemblerPredicate = HasGFX10_BEncoding,
- AsmMatchConverter = !if(!eq(A16,1), "cvtIntersectRay", ""),
+ AsmMatchConverter = !if(A16, "cvtIntersectRay", ""),
dmask = 0xf,
unorm = 1,
d16 = 0,
diff --git a/llvm/lib/Target/AMDGPU/SIInstrFormats.td b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
index 428c21c896d5..ca1cfc65c94a 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrFormats.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrFormats.td
@@ -203,11 +203,11 @@ class InstSI <dag outs, dag ins, string asm = "",
field bits<1> DisableVIDecoder = 0;
field bits<1> DisableDecoder = 0;
- let isAsmParserOnly = !if(!eq(DisableDecoder{0}, {0}), 0, 1);
+ let isAsmParserOnly = !ne(DisableDecoder{0}, {0});
let AsmVariantName = AMDGPUAsmVariants.Default;
// Avoid changing source registers in a way that violates constant bus read limitations.
- let hasExtraSrcRegAllocReq = !if(VOP1,1,!if(VOP2,1,!if(VOP3,1,!if(VOPC,1,!if(SDWA,1, !if(VALU,1,0))))));
+ let hasExtraSrcRegAllocReq = !or(VOP1, VOP2, VOP3, VOPC, SDWA, VALU);
}
class PseudoInstSI<dag outs, dag ins, list<dag> pattern = [], string asm = "">
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index 1aa4be8aa286..86c54efe3480 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -259,31 +259,25 @@ def SIdenorm_mode : SDNode<"AMDGPUISD::DENORM_MODE",
// Returns 1 if the source arguments have modifiers, 0 if they do not.
// XXX - do f16 instructions?
class isFloatType<ValueType SrcVT> {
- bit ret =
- !if(!eq(SrcVT.Value, f16.Value), 1,
- !if(!eq(SrcVT.Value, f32.Value), 1,
- !if(!eq(SrcVT.Value, f64.Value), 1,
- !if(!eq(SrcVT.Value, v2f16.Value), 1,
- !if(!eq(SrcVT.Value, v4f16.Value), 1,
- !if(!eq(SrcVT.Value, v2f32.Value), 1,
- !if(!eq(SrcVT.Value, v2f64.Value), 1,
- 0)))))));
+ bit ret = !or(!eq(SrcVT.Value, f16.Value),
+ !eq(SrcVT.Value, f32.Value),
+ !eq(SrcVT.Value, f64.Value),
+ !eq(SrcVT.Value, v2f16.Value),
+ !eq(SrcVT.Value, v4f16.Value),
+ !eq(SrcVT.Value, v2f32.Value),
+ !eq(SrcVT.Value, v2f64.Value));
}
class isIntType<ValueType SrcVT> {
- bit ret =
- !if(!eq(SrcVT.Value, i16.Value), 1,
- !if(!eq(SrcVT.Value, i32.Value), 1,
- !if(!eq(SrcVT.Value, i64.Value), 1,
- 0)));
+ bit ret = !or(!eq(SrcVT.Value, i16.Value),
+ !eq(SrcVT.Value, i32.Value),
+ !eq(SrcVT.Value, i64.Value));
}
class isPackedType<ValueType SrcVT> {
- bit ret =
- !if(!eq(SrcVT.Value, v2i16.Value), 1,
- !if(!eq(SrcVT.Value, v2f16.Value), 1,
- !if(!eq(SrcVT.Value, v4f16.Value), 1, 0)
- ));
+ bit ret = !or(!eq(SrcVT.Value, v2i16.Value),
+ !eq(SrcVT.Value, v2f16.Value),
+ !eq(SrcVT.Value, v4f16.Value));
}
//===----------------------------------------------------------------------===//
@@ -1393,8 +1387,8 @@ def HWREG {
class getHwRegImm<int Reg, int Offset = 0, int Size = 32> {
int ret = !and(!or(Reg,
- !or(!shl(Offset, 6),
- !shl(!add(Size, -1), 11))), 65535);
+ !shl(Offset, 6),
+ !shl(!add(Size, -1), 11)), 65535);
}
//===----------------------------------------------------------------------===//
@@ -1590,13 +1584,11 @@ class getVOP3SrcForVT<ValueType VT> {
// Float or packed int
class isModifierType<ValueType SrcVT> {
- bit ret =
- !if(!eq(SrcVT.Value, f16.Value), 1,
- !if(!eq(SrcVT.Value, f32.Value), 1,
- !if(!eq(SrcVT.Value, f64.Value), 1,
- !if(!eq(SrcVT.Value, v2f16.Value), 1,
- !if(!eq(SrcVT.Value, v2i16.Value), 1,
- 0)))));
+ bit ret = !or(!eq(SrcVT.Value, f16.Value),
+ !eq(SrcVT.Value, f32.Value),
+ !eq(SrcVT.Value, f64.Value),
+ !eq(SrcVT.Value, v2f16.Value),
+ !eq(SrcVT.Value, v2i16.Value));
}
// Return type of input modifiers operand for specified input operand
@@ -2114,14 +2106,6 @@ class getHasDPP <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret);
}
-class BitOr<bit a, bit b> {
- bit ret = !if(a, 1, !if(b, 1, 0));
-}
-
-class BitAnd<bit a, bit b> {
- bit ret = !if(a, !if(b, 1, 0), 0);
-}
-
def PatGenMode {
int NoPattern = 0;
int Pattern = 1;
@@ -2159,18 +2143,18 @@ class VOPProfile <list<ValueType> _ArgVT, bit _EnableF32SrcMods = 0,
field Operand Src1ModSDWA = getSrcModSDWA<Src1VT>.ret;
- field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1);
+ field bit HasDst = !ne(DstVT.Value, untyped.Value);
field bit HasDst32 = HasDst;
field bit EmitDst = HasDst; // force dst encoding, see v_movreld_b32 special case
field int NumSrcArgs = getNumSrcArgs<Src0VT, Src1VT, Src2VT>.ret;
- field bit HasSrc0 = !if(!eq(Src0VT.Value, untyped.Value), 0, 1);
- field bit HasSrc1 = !if(!eq(Src1VT.Value, untyped.Value), 0, 1);
- field bit HasSrc2 = !if(!eq(Src2VT.Value, untyped.Value), 0, 1);
+ field bit HasSrc0 = !ne(Src0VT.Value, untyped.Value);
+ field bit HasSrc1 = !ne(Src1VT.Value, untyped.Value);
+ field bit HasSrc2 = !ne(Src2VT.Value, untyped.Value);
// TODO: Modifiers logic is somewhat adhoc here, to be refined later
// HasModifiers affects the normal and DPP encodings. We take note of EnableF32SrcMods, which
// enables modifiers for i32 type.
- field bit HasModifiers = BitOr<isModifierType<Src0VT>.ret, EnableF32SrcMods>.ret;
+ field bit HasModifiers = !or(isModifierType<Src0VT>.ret, EnableF32SrcMods);
// HasSrc*FloatMods affects the SDWA encoding. We ignore EnableF32SrcMods.
field bit HasSrc0FloatMods = isFloatType<Src0VT>.ret;
@@ -2183,15 +2167,15 @@ class VOPProfile <list<ValueType> _ArgVT, bit _EnableF32SrcMods = 0,
field bit HasSrc2IntMods = isIntType<Src2VT>.ret;
field bit HasSrc0Mods = HasModifiers;
- field bit HasSrc1Mods = !if(HasModifiers, BitOr<HasSrc1FloatMods, HasSrc1IntMods>.ret, 0);
- field bit HasSrc2Mods = !if(HasModifiers, BitOr<HasSrc2FloatMods, HasSrc2IntMods>.ret, 0);
+ field bit HasSrc1Mods = !if(HasModifiers, !or(HasSrc1FloatMods, HasSrc1IntMods), 0);
+ field bit HasSrc2Mods = !if(HasModifiers, !or(HasSrc2FloatMods, HasSrc2IntMods), 0);
- field bit HasClamp = BitOr<isModifierType<Src0VT>.ret, EnableClamp>.ret;
+ field bit HasClamp = !or(isModifierType<Src0VT>.ret, EnableClamp);
field bit HasSDWAClamp = EmitDst;
- field bit HasFPClamp = BitAnd<isFloatType<DstVT>.ret, HasClamp>.ret;
+ field bit HasFPClamp = !and(isFloatType<DstVT>.ret, HasClamp);
field bit HasIntClamp = !if(isFloatType<DstVT>.ret, 0, HasClamp);
field bit HasClampLo = HasClamp;
- field bit HasClampHi = BitAnd<isPackedType<DstVT>.ret, HasClamp>.ret;
+ field bit HasClampHi = !and(isPackedType<DstVT>.ret, HasClamp);
field bit HasHigh = 0;
field bit IsPacked = isPackedType<Src0VT>.ret;
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index ff1f5c4bc49b..4bbd39c1a8e5 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -149,7 +149,7 @@ multiclass SIRegLoHi16 <string n, bits<16> regIdx, bit ArtificialHigh = 1,
!cast<Register>(NAME#"_HI16")]> {
let Namespace = "AMDGPU";
let SubRegIndices = [lo16, hi16];
- let CoveredBySubRegs = !if(ArtificialHigh,0,1);
+ let CoveredBySubRegs = !not(ArtificialHigh);
let HWEncoding = regIdx;
let HWEncoding{8} = HWEncodingHigh;
}
diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 8fea9403cc42..2a0b7466c7f2 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -138,7 +138,7 @@ class VOP3Inst<string OpName, VOPProfile P, SDPatternOperator node = null_frag,
let AsmMatchConverter =
!if(P.HasOpSel,
"cvtVOP3OpSel",
- !if(!or(P.HasModifiers, !or(P.HasOMod, P.HasIntClamp)),
+ !if(!or(P.HasModifiers, P.HasOMod, P.HasIntClamp),
"cvtVOP3",
""));
}
@@ -174,7 +174,7 @@ class VOP3_Profile<VOPProfile P, VOP3Features Features = VOP3_REGULAR> : VOPProf
let IsMAI = !if(Features.IsMAI, 1, P.IsMAI);
let IsPacked = !if(Features.IsPacked, 1, P.IsPacked);
- let HasModifiers = !if(Features.IsPacked, !if(Features.IsMAI, 0, 1), P.HasModifiers);
+ let HasModifiers = !if(Features.IsPacked, !not(Features.IsMAI), P.HasModifiers);
// FIXME: Hack to stop printing _e64
let Outs64 = (outs DstRC.RegClass:$vdst);
@@ -277,7 +277,7 @@ class getInterp16Ins <bit HasSrc2, bit HasOMod,
class VOP3_INTERP16 <list<ValueType> ArgVT> : VOPProfile<ArgVT> {
- let HasOMod = !if(!eq(DstVT.Value, f16.Value), 0, 1);
+ let HasOMod = !ne(DstVT.Value, f16.Value);
let HasHigh = 1;
let Outs64 = (outs VGPR_32:$vdst);
diff --git a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
index 393fc8b09d44..159731707eab 100644
--- a/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3PInstructions.td
@@ -226,7 +226,7 @@ foreach Type = ["I", "U"] in
foreach Index = 0-3 in {
// Defines patterns that extract each Index'ed 8bit from an unsigned
// 32bit scalar value;
- def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !if (!eq (Type, "U"), 1, 0)>;
+ def Type#Index#"_8bit" : Extract<!shl(Index, 3), 255, !eq (Type, "U")>;
// Defines multiplication patterns where the multiplication is happening on each
// Index'ed 8bit of a 32bit scalar value.
@@ -254,7 +254,7 @@ foreach Type = ["I", "U"] in
foreach Index = 0-7 in {
// Defines patterns that extract each Index'ed 4bit from an unsigned
// 32bit scalar value;
- def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !if (!eq (Type, "U"), 1, 0)>;
+ def Type#Index#"_4bit" : Extract<!shl(Index, 2), 15, !eq (Type, "U")>;
// Defines multiplication patterns where the multiplication is happening on each
// Index'ed 8bit of a 32bit scalar value.
diff --git a/llvm/lib/Target/AMDGPU/VOPInstructions.td b/llvm/lib/Target/AMDGPU/VOPInstructions.td
index b27a1d31863d..8df188602218 100644
--- a/llvm/lib/Target/AMDGPU/VOPInstructions.td
+++ b/llvm/lib/Target/AMDGPU/VOPInstructions.td
@@ -69,7 +69,7 @@ class VOP3Common <dag outs, dag ins, string asm = "",
let VOP3 = 1;
let AsmVariantName = AMDGPUAsmVariants.VOP3;
- let AsmMatchConverter = !if(!eq(HasMods,1), "cvtVOP3", "");
+ let AsmMatchConverter = !if(HasMods, "cvtVOP3", "");
let isCodeGenOnly = 0;
@@ -129,7 +129,7 @@ class VOP3_Pseudo <string opName, VOPProfile P, list<dag> pattern = [],
let AsmMatchConverter =
!if(isVOP3P,
"cvtVOP3P",
- !if(!or(P.HasModifiers, !or(P.HasOMod, P.HasIntClamp)),
+ !if(!or(P.HasModifiers, P.HasOMod, P.HasIntClamp),
"cvtVOP3",
""));
}
@@ -626,7 +626,7 @@ class VOP_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[]> :
string Mnemonic = OpName;
string AsmOperands = P.AsmDPP;
- let AsmMatchConverter = !if(!eq(P.HasModifiers,1), "cvtDPP", "");
+ let AsmMatchConverter = !if(P.HasModifiers, "cvtDPP", "");
let SubtargetPredicate = HasDPP;
let AssemblerPredicate = HasDPP;
let AsmVariantName = !if(P.HasExtDPP, AMDGPUAsmVariants.DPP,
@@ -681,7 +681,7 @@ class VOP_DPP <string OpName, VOPProfile P, bit IsDPP16,
let DPP = 1;
let Size = 8;
- let AsmMatchConverter = !if(!eq(P.HasModifiers,1), "cvtDPP", "");
+ let AsmMatchConverter = !if(P.HasModifiers, "cvtDPP", "");
let SubtargetPredicate = HasDPP;
let AssemblerPredicate = HasDPP;
let AsmVariantName = !if(P.HasExtDPP, AMDGPUAsmVariants.DPP,
More information about the llvm-commits
mailing list