[llvm-commits] [llvm] r55055 - in /llvm/trunk/lib/Target: Alpha/AlphaInstrInfo.td TargetSelectionDAG.td X86/X86InstrInfo.td X86/X86InstrSSE.td
Dan Gohman
gohman at apple.com
Wed Aug 20 08:24:24 PDT 2008
Author: djg
Date: Wed Aug 20 10:24:22 2008
New Revision: 55055
URL: http://llvm.org/viewvc/llvm-project?rev=55055&view=rev
Log:
Tablegen generated code already tests the opcode value, so it's not
necessary to use dyn_cast in these predicates.
Modified:
llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.td
llvm/trunk/lib/Target/TargetSelectionDAG.td
llvm/trunk/lib/Target/X86/X86InstrInfo.td
llvm/trunk/lib/Target/X86/X86InstrSSE.td
Modified: llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.td?rev=55055&r1=55054&r2=55055&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.td (original)
+++ llvm/trunk/lib/Target/Alpha/AlphaInstrInfo.td Wed Aug 20 10:24:22 2008
@@ -89,11 +89,9 @@
}], SExt16>;
def zappat : PatFrag<(ops node:$LHS), (and node:$LHS, imm:$L), [{
- if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(N->getOperand(1))) {
- uint64_t build = get_zapImm(N->getOperand(0), (uint64_t)RHS->getValue());
- return build != 0;
- }
- return false;
+ ConstantSDNode *RHS = cast<ConstantSDNode>(N->getOperand(1));
+ uint64_t build = get_zapImm(N->getOperand(0), (uint64_t)RHS->getValue());
+ return build != 0;
}]>;
def immFPZ : PatLeaf<(fpimm), [{ //the only fpconstant nodes are +/- 0.0
Modified: llvm/trunk/lib/Target/TargetSelectionDAG.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/TargetSelectionDAG.td?rev=55055&r1=55054&r2=55055&view=diff
==============================================================================
--- llvm/trunk/lib/Target/TargetSelectionDAG.td (original)
+++ llvm/trunk/lib/Target/TargetSelectionDAG.td Wed Aug 20 10:24:22 2008
@@ -497,347 +497,291 @@
// load fragments.
def load : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::NON_EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::NON_EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED;
}]>;
// extending load fragments.
def extloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i1;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i1;
}]>;
def extloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i8;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i8;
}]>;
def extloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i16;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i16;
}]>;
def extloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i32;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i32;
}]>;
def extloadf32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::f32;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::f32;
}]>;
def extloadf64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::f64;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::f64;
}]>;
def sextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::SEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i1;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::SEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i1;
}]>;
def sextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::SEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i8;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::SEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i8;
}]>;
def sextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::SEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i16;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::SEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i16;
}]>;
def sextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::SEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i32;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::SEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i32;
}]>;
def zextloadi1 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::ZEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i1;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::ZEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i1;
}]>;
def zextloadi8 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::ZEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i8;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::ZEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i8;
}]>;
def zextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::ZEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i16;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::ZEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i16;
}]>;
def zextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::ZEXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getMemoryVT() == MVT::i32;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::ZEXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getMemoryVT() == MVT::i32;
}]>;
// store fragments.
def store : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return !ST->isTruncatingStore() &&
- ST->getAddressingMode() == ISD::UNINDEXED;
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ return !ST->isTruncatingStore() &&
+ ST->getAddressingMode() == ISD::UNINDEXED;
}]>;
// truncstore fragments.
def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 &&
- ST->getAddressingMode() == ISD::UNINDEXED;
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 &&
+ ST->getAddressingMode() == ISD::UNINDEXED;
}]>;
def truncstorei16 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 &&
- ST->getAddressingMode() == ISD::UNINDEXED;
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 &&
+ ST->getAddressingMode() == ISD::UNINDEXED;
}]>;
def truncstorei32 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
+ StoreSDNode *ST = cast<StoreSDNode>(N);
return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32 &&
ST->getAddressingMode() == ISD::UNINDEXED;
return false;
}]>;
def truncstoref32 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 &&
- ST->getAddressingMode() == ISD::UNINDEXED;
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 &&
+ ST->getAddressingMode() == ISD::UNINDEXED;
}]>;
def truncstoref64 : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 &&
- ST->getAddressingMode() == ISD::UNINDEXED;
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 &&
+ ST->getAddressingMode() == ISD::UNINDEXED;
}]>;
// indexed store fragments.
def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
- !ST->isTruncatingStore();
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
+ !ST->isTruncatingStore();
}]>;
def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
}]>;
def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
}]>;
def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
}]>;
def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
}]>;
def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
}]>;
def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset),
(ist node:$val, node:$ptr, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return !ST->isTruncatingStore() &&
- (AM == ISD::POST_INC || AM == ISD::POST_DEC);
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return !ST->isTruncatingStore() &&
+ (AM == ISD::POST_INC || AM == ISD::POST_DEC);
}]>;
def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
}]>;
def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
}]>;
def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
}]>;
def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
}]>;
def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
(ist node:$val, node:$base, node:$offset), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
- ISD::MemIndexedMode AM = ST->getAddressingMode();
- return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
- ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
- }
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ ISD::MemIndexedMode AM = ST->getAddressingMode();
+ return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
+ ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
}]>;
-//Atomic patterns
+// Atomic patterns
def atomic_cmp_swap_8 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i8;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i8;
}]>;
def atomic_cmp_swap_16 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i16;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i16;
}]>;
def atomic_cmp_swap_32 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i32;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i32;
}]>;
def atomic_cmp_swap_64 : PatFrag<(ops node:$ptr, node:$cmp, node:$swp),
(atomic_cmp_swap node:$ptr, node:$cmp, node:$swp), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i64;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i64;
}]>;
def atomic_load_add_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_add node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i8;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i8;
}]>;
def atomic_load_add_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_add node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i16;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i16;
}]>;
def atomic_load_add_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_add node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i32;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i32;
}]>;
def atomic_load_add_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_load_add node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i64;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i64;
}]>;
def atomic_swap_8 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i8;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i8;
}]>;
def atomic_swap_16 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i16;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i16;
}]>;
def atomic_swap_32 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i32;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i32;
}]>;
def atomic_swap_64 : PatFrag<(ops node:$ptr, node:$inc),
(atomic_swap node:$ptr, node:$inc), [{
- if (AtomicSDNode* V = dyn_cast<AtomicSDNode>(N))
- return V->getValueType(0) == MVT::i64;
- return false;
+ AtomicSDNode* V = cast<AtomicSDNode>(N);
+ return V->getValueType(0) == MVT::i64;
}]>;
Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=55055&r1=55054&r2=55055&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Wed Aug 20 10:24:22 2008
@@ -232,28 +232,26 @@
// It's always safe to treat a anyext i16 load as a i32 load if the i16 is
// known to be 32-bit aligned or better. Ditto for i8 to i16.
def loadi16 : PatFrag<(ops node:$ptr), (i16 (ld node:$ptr)), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- if (LD->getAddressingMode() != ISD::UNINDEXED)
- return false;
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::NON_EXTLOAD)
- return true;
- if (ExtType == ISD::EXTLOAD)
- return LD->getAlignment() >= 2 && !LD->isVolatile();
- }
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (LD->getAddressingMode() != ISD::UNINDEXED)
+ return false;
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (ExtType == ISD::NON_EXTLOAD)
+ return true;
+ if (ExtType == ISD::EXTLOAD)
+ return LD->getAlignment() >= 2 && !LD->isVolatile();
return false;
}]>;
def loadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
- if (LD->getAddressingMode() != ISD::UNINDEXED)
- return false;
- ISD::LoadExtType ExtType = LD->getExtensionType();
- if (ExtType == ISD::NON_EXTLOAD)
- return true;
- if (ExtType == ISD::EXTLOAD)
- return LD->getAlignment() >= 4 && !LD->isVolatile();
- }
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (LD->getAddressingMode() != ISD::UNINDEXED)
+ return false;
+ ISD::LoadExtType ExtType = LD->getExtensionType();
+ if (ExtType == ISD::NON_EXTLOAD)
+ return true;
+ if (ExtType == ISD::EXTLOAD)
+ return LD->getAlignment() >= 4 && !LD->isVolatile();
return false;
}]>;
Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=55055&r1=55054&r2=55055&view=diff
==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Wed Aug 20 10:24:22 2008
@@ -99,20 +99,18 @@
// Like 'store', but always requires vector alignment.
def alignedstore : PatFrag<(ops node:$val, node:$ptr),
(st node:$val, node:$ptr), [{
- if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N))
- return !ST->isTruncatingStore() &&
- ST->getAddressingMode() == ISD::UNINDEXED &&
- ST->getAlignment() >= 16;
- return false;
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ return !ST->isTruncatingStore() &&
+ ST->getAddressingMode() == ISD::UNINDEXED &&
+ ST->getAlignment() >= 16;
}]>;
// Like 'load', but always requires vector alignment.
def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::NON_EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getAlignment() >= 16;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::NON_EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getAlignment() >= 16;
}]>;
def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>;
@@ -128,11 +126,10 @@
// FIXME: Actually implement support for targets that don't require the
// alignment. This probably wants a subtarget predicate.
def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::NON_EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getAlignment() >= 16;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::NON_EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getAlignment() >= 16;
}]>;
def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>;
@@ -147,11 +144,10 @@
// 16-byte boundary.
// FIXME: 8 byte alignment for mmx reads is not required
def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
- if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
- return LD->getExtensionType() == ISD::NON_EXTLOAD &&
- LD->getAddressingMode() == ISD::UNINDEXED &&
- LD->getAlignment() >= 8;
- return false;
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ return LD->getExtensionType() == ISD::NON_EXTLOAD &&
+ LD->getAddressingMode() == ISD::UNINDEXED &&
+ LD->getAlignment() >= 8;
}]>;
def memopv8i8 : PatFrag<(ops node:$ptr), (v8i8 (memop64 node:$ptr))>;
More information about the llvm-commits
mailing list