[llvm-commits] [llvm] r57565 - in /llvm/trunk/lib/Target: TargetSelectionDAG.td X86/X86InstrInfo.td X86/X86InstrSSE.td

Dan Gohman gohman at apple.com
Tue Oct 14 23:50:19 PDT 2008


Author: djg
Date: Wed Oct 15 01:50:19 2008
New Revision: 57565

URL: http://llvm.org/viewvc/llvm-project?rev=57565&view=rev
Log:
Now that predicates can be composed, simplify several of
the predicates by extending simple predicates to create
more complex predicates instead of duplicating the logic
for the simple predicates.

This doesn't reduce much redundancy in DAGISelEmitter.cpp's
generated source yet; that will require improvements to
DAGISelEmitter.cpp's instruction sorting, to make it more
effectively group nodes with similar predicates together.

Modified:
    llvm/trunk/lib/Target/TargetSelectionDAG.td
    llvm/trunk/lib/Target/X86/X86InstrInfo.td
    llvm/trunk/lib/Target/X86/X86InstrSSE.td

Modified: llvm/trunk/lib/Target/TargetSelectionDAG.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/TargetSelectionDAG.td?rev=57565&r1=57564&r2=57565&view=diff

==============================================================================
--- llvm/trunk/lib/Target/TargetSelectionDAG.td (original)
+++ llvm/trunk/lib/Target/TargetSelectionDAG.td Wed Oct 15 01:50:19 2008
@@ -573,227 +573,177 @@
 def ineg : PatFrag<(ops node:$in), (sub 0, node:$in)>;
 
 // load fragments.
-def load : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::NON_EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED;
+def unindexedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
+  return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+}]>;
+def load : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
 }]>;
 
 // extending load fragments.
-def extloadi1  : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i1;
-}]>;
-def extloadi8  : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i8;
-}]>;
-def extloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i16;
-}]>;
-def extloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i32;
-}]>;
-def extloadf32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::f32;
-}]>;
-def extloadf64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::f64;
-}]>;
-
-def sextloadi1  : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::SEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i1;
-}]>;
-def sextloadi8  : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::SEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i8;
-}]>;
-def sextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::SEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i16;
-}]>;
-def sextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::SEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i32;
-}]>;
-
-def zextloadi1  : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::ZEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i1;
-}]>;
-def zextloadi8  : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::ZEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i8;
-}]>;
-def zextloadi16 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::ZEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i16;
-}]>;
-def zextloadi32 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::ZEXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getMemoryVT() == MVT::i32;
+def extload   : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
+}]>;
+def sextload  : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
+}]>;
+def zextload  : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
+}]>;
+
+def extloadi1  : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def extloadi8  : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def extloadi16 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def extloadi32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+def extloadf32 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f32;
+}]>;
+def extloadf64 : PatFrag<(ops node:$ptr), (extload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::f64;
+}]>;
+
+def sextloadi1  : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def sextloadi8  : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def sextloadi16 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def sextloadi32 : PatFrag<(ops node:$ptr), (sextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
+}]>;
+
+def zextloadi1  : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i1;
+}]>;
+def zextloadi8  : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
+}]>;
+def zextloadi16 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
+}]>;
+def zextloadi32 : PatFrag<(ops node:$ptr), (zextload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
 }]>;
 
 // store fragments.
+def unindexedstore : PatFrag<(ops node:$val, node:$ptr),
+                             (st node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
+}]>;
 def store : PatFrag<(ops node:$val, node:$ptr),
-                    (st node:$val, node:$ptr), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  return !ST->isTruncatingStore() &&
-         ST->getAddressingMode() == ISD::UNINDEXED;
+                    (unindexedstore node:$val, node:$ptr), [{
+  return !cast<StoreSDNode>(N)->isTruncatingStore();
 }]>;
 
 // truncstore fragments.
+def truncstore : PatFrag<(ops node:$val, node:$ptr),
+                         (unindexedstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->isTruncatingStore();
+}]>;
 def truncstorei8 : PatFrag<(ops node:$val, node:$ptr),
-                           (st node:$val, node:$ptr), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8 &&
-         ST->getAddressingMode() == ISD::UNINDEXED;
+                           (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
 }]>;
 def truncstorei16 : PatFrag<(ops node:$val, node:$ptr),
-                            (st node:$val, node:$ptr), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16 &&
-         ST->getAddressingMode() == ISD::UNINDEXED;
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
 }]>;
 def truncstorei32 : PatFrag<(ops node:$val, node:$ptr),
-                            (st node:$val, node:$ptr), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32 &&
-         ST->getAddressingMode() == ISD::UNINDEXED;
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
 }]>;
 def truncstoref32 : PatFrag<(ops node:$val, node:$ptr),
-                            (st node:$val, node:$ptr), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32 &&
-         ST->getAddressingMode() == ISD::UNINDEXED;
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
 }]>;
 def truncstoref64 : PatFrag<(ops node:$val, node:$ptr),
-                            (st node:$val, node:$ptr), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  return ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f64 &&
-         ST->getAddressingMode() == ISD::UNINDEXED;
+                            (truncstore node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f64;
 }]>;
 
 // indexed store fragments.
+def istore : PatFrag<(ops node:$val, node:$base, node:$offset),
+                     (ist node:$val, node:$base, node:$offset), [{
+  return !cast<StoreSDNode>(N)->isTruncatingStore();
+}]>;
+
 def pre_store : PatFrag<(ops node:$val, node:$base, node:$offset),
-                        (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
-         !ST->isTruncatingStore();
+                        (istore node:$val, node:$base, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
 }]>;
 
+def itruncstore : PatFrag<(ops node:$val, node:$base, node:$offset),
+                          (ist node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->isTruncatingStore();
+}]>;
+def pre_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
+                          (itruncstore node:$val, node:$base, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::PRE_INC || AM == ISD::PRE_DEC;
+}]>;
 def pre_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                            (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
+                            (pre_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
 }]>;
 def pre_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                            (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
+                            (pre_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
 }]>;
 def pre_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                             (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
+                             (pre_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
 }]>;
 def pre_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                             (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
+                             (pre_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
 }]>;
 def pre_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                             (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::PRE_INC || AM == ISD::PRE_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
+                             (pre_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
 }]>;
 
 def post_store : PatFrag<(ops node:$val, node:$ptr, node:$offset),
-                         (ist node:$val, node:$ptr, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return !ST->isTruncatingStore() &&
-          (AM == ISD::POST_INC || AM == ISD::POST_DEC);
+                         (istore node:$val, node:$ptr, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::POST_INC || AM == ISD::POST_DEC;
 }]>;
 
+def post_truncst : PatFrag<(ops node:$val, node:$base, node:$offset),
+                           (itruncstore node:$val, node:$base, node:$offset), [{
+  ISD::MemIndexedMode AM = cast<StoreSDNode>(N)->getAddressingMode();
+  return AM == ISD::POST_INC || AM == ISD::POST_DEC;
+}]>;
 def post_truncsti1 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                             (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i1;
+                             (post_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i1;
 }]>;
 def post_truncsti8 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                             (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i8;
+                             (post_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
 }]>;
 def post_truncsti16 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                              (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i16;
+                              (post_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
 }]>;
 def post_truncsti32 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                              (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::i32;
+                              (post_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i32;
 }]>;
 def post_truncstf32 : PatFrag<(ops node:$val, node:$base, node:$offset),
-                              (ist node:$val, node:$base, node:$offset), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  ISD::MemIndexedMode AM = ST->getAddressingMode();
-  return (AM == ISD::POST_INC || AM == ISD::POST_DEC) &&
-         ST->isTruncatingStore() && ST->getMemoryVT() == MVT::f32;
+                              (post_truncst node:$val, node:$base, node:$offset), [{
+  return cast<StoreSDNode>(N)->getMemoryVT() == MVT::f32;
 }]>;
 
 // setcc convenience fragments.

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=57565&r1=57564&r2=57565&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Wed Oct 15 01:50:19 2008
@@ -254,10 +254,8 @@
 // Helper fragments for loads.
 // It's always safe to treat a anyext i16 load as a i32 load if the i16 is
 // known to be 32-bit aligned or better. Ditto for i8 to i16.
-def loadi16 : PatFrag<(ops node:$ptr), (i16 (ld node:$ptr)), [{
+def loadi16 : PatFrag<(ops node:$ptr), (i16 (unindexedload node:$ptr)), [{
   LoadSDNode *LD = cast<LoadSDNode>(N);
-  if (LD->getAddressingMode() != ISD::UNINDEXED)
-    return false;
   ISD::LoadExtType ExtType = LD->getExtensionType();
   if (ExtType == ISD::NON_EXTLOAD)
     return true;
@@ -266,20 +264,16 @@
   return false;
 }]>;
 
-def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
+def loadi16_anyext : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
   LoadSDNode *LD = cast<LoadSDNode>(N);
-  if (LD->getAddressingMode() != ISD::UNINDEXED)
-    return false;
   ISD::LoadExtType ExtType = LD->getExtensionType();
   if (ExtType == ISD::EXTLOAD)
     return LD->getAlignment() >= 2 && !LD->isVolatile();
   return false;
 }]>;
 
-def loadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
+def loadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
   LoadSDNode *LD = cast<LoadSDNode>(N);
-  if (LD->getAddressingMode() != ISD::UNINDEXED)
-    return false;
   ISD::LoadExtType ExtType = LD->getExtensionType();
   if (ExtType == ISD::NON_EXTLOAD)
     return true;
@@ -288,12 +282,10 @@
   return false;
 }]>;
 
-def nvloadi32 : PatFrag<(ops node:$ptr), (i32 (ld node:$ptr)), [{
+def nvloadi32 : PatFrag<(ops node:$ptr), (i32 (unindexedload node:$ptr)), [{
   LoadSDNode *LD = cast<LoadSDNode>(N);
   if (LD->isVolatile())
     return false;
-  if (LD->getAddressingMode() != ISD::UNINDEXED)
-    return false;
   ISD::LoadExtType ExtType = LD->getExtensionType();
   if (ExtType == ISD::NON_EXTLOAD)
     return true;

Modified: llvm/trunk/lib/Target/X86/X86InstrSSE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrSSE.td?rev=57565&r1=57564&r2=57565&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrSSE.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrSSE.td Wed Oct 15 01:50:19 2008
@@ -98,19 +98,13 @@
 
 // Like 'store', but always requires vector alignment.
 def alignedstore : PatFrag<(ops node:$val, node:$ptr),
-                           (st node:$val, node:$ptr), [{
-  StoreSDNode *ST = cast<StoreSDNode>(N);
-  return !ST->isTruncatingStore() &&
-         ST->getAddressingMode() == ISD::UNINDEXED &&
-         ST->getAlignment() >= 16;
+                           (store node:$val, node:$ptr), [{
+  return cast<StoreSDNode>(N)->getAlignment() >= 16;
 }]>;
 
 // Like 'load', but always requires vector alignment.
-def alignedload : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::NON_EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getAlignment() >= 16;
+def alignedload : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+  return cast<LoadSDNode>(N)->getAlignment() >= 16;
 }]>;
 
 def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32   (alignedload node:$ptr))>;
@@ -125,11 +119,8 @@
 // be naturally aligned on some targets but not on others.
 // FIXME: Actually implement support for targets that don't require the
 //        alignment. This probably wants a subtarget predicate.
-def memop : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::NON_EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getAlignment() >= 16;
+def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{
+  return cast<LoadSDNode>(N)->getAlignment() >= 16;
 }]>;
 
 def memopfsf32 : PatFrag<(ops node:$ptr), (f32   (memop node:$ptr))>;
@@ -143,11 +134,8 @@
 // SSSE3 uses MMX registers for some instructions. They aren't aligned on a
 // 16-byte boundary.
 // FIXME: 8 byte alignment for mmx reads is not required
-def memop64 : PatFrag<(ops node:$ptr), (ld node:$ptr), [{
-  LoadSDNode *LD = cast<LoadSDNode>(N);
-  return LD->getExtensionType() == ISD::NON_EXTLOAD &&
-         LD->getAddressingMode() == ISD::UNINDEXED &&
-         LD->getAlignment() >= 8;
+def memop64 : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
+  return cast<LoadSDNode>(N)->getAlignment() >= 8;
 }]>;
 
 def memopv8i8  : PatFrag<(ops node:$ptr), (v8i8  (memop64 node:$ptr))>;





More information about the llvm-commits mailing list