[llvm-commits] [llvm] r46280 - in /llvm/trunk/lib: CodeGen/SelectionDAG/LegalizeDAG.cpp Target/ARM/ARMISelLowering.cpp Target/IA64/IA64ISelLowering.cpp Target/PowerPC/PPCISelLowering.cpp Target/Sparc/SparcISelDAGToDAG.cpp Target/X86/X86ISelLowering.cpp Target/X86/X86Instr64bit.td Target/X86/X86InstrInfo.td

Duncan Sands baldrick at free.fr
Wed Jan 23 12:39:46 PST 2008


Author: baldrick
Date: Wed Jan 23 14:39:46 2008
New Revision: 46280

URL: http://llvm.org/viewvc/llvm-project?rev=46280&view=rev
Log:
The last pieces needed for loading arbitrary
precision integers.  This won't actually work
(and most of the code is dead) unless the new
legalization machinery is turned on.  While
there, I rationalized the handling of i1, and
removed some bogus (and unused) sextload patterns.
For i1, this could result in microscopically
better code for some architectures (not X86).
It might also result in worse code if annotating
with AssertZExt nodes turns out to be more harmful
than helpful.

Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
    llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
    llvm/trunk/lib/Target/IA64/IA64ISelLowering.cpp
    llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
    llvm/trunk/lib/Target/Sparc/SparcISelDAGToDAG.cpp
    llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
    llvm/trunk/lib/Target/X86/X86Instr64bit.td
    llvm/trunk/lib/Target/X86/X86InstrInfo.td

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp Wed Jan 23 14:39:46 2008
@@ -1820,76 +1820,188 @@
       return Op.ResNo ? Tmp4 : Tmp3;
     } else {
       MVT::ValueType SrcVT = LD->getLoadedVT();
-      switch (TLI.getLoadXAction(ExtType, SrcVT)) {
-      default: assert(0 && "This action is not supported yet!");
-      case TargetLowering::Promote:
-        assert(SrcVT == MVT::i1 &&
-               "Can only promote extending LOAD from i1 -> i8!");
-        Result = DAG.getExtLoad(ExtType, Node->getValueType(0), Tmp1, Tmp2,
-                                LD->getSrcValue(), LD->getSrcValueOffset(),
-                                MVT::i8, LD->isVolatile(), LD->getAlignment());
-        Tmp1 = Result.getValue(0);
-        Tmp2 = Result.getValue(1);
-      break;
-      case TargetLowering::Custom:
-        isCustom = true;
-        // FALLTHROUGH
-      case TargetLowering::Legal:
-        Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
-        Tmp1 = Result.getValue(0);
-        Tmp2 = Result.getValue(1);
-      
-        if (isCustom) {
-          Tmp3 = TLI.LowerOperation(Result, DAG);
-          if (Tmp3.Val) {
-            Tmp1 = LegalizeOp(Tmp3);
-            Tmp2 = LegalizeOp(Tmp3.getValue(1));
-          }
+      unsigned SrcWidth = MVT::getSizeInBits(SrcVT);
+      int SVOffset = LD->getSrcValueOffset();
+      unsigned Alignment = LD->getAlignment();
+      bool isVolatile = LD->isVolatile();
+
+      if (SrcWidth != MVT::getStoreSizeInBits(SrcVT) &&
+          // Some targets pretend to have an i1 loading operation, and actually
+          // load an i8.  This trick is correct for ZEXTLOAD because the top 7
+          // bits are guaranteed to be zero; it helps the optimizers understand
+          // that these bits are zero.  It is also useful for EXTLOAD, since it
+          // tells the optimizers that those bits are undefined.  It would be
+          // nice to have an effective generic way of getting these benefits...
+          // Until such a way is found, don't insist on promoting i1 here.
+          (SrcVT != MVT::i1 ||
+           TLI.getLoadXAction(ExtType, MVT::i1) == TargetLowering::Promote)) {
+        // Promote to a byte-sized load if not loading an integral number of
+        // bytes.  For example, promote EXTLOAD:i20 -> EXTLOAD:i24.
+        unsigned NewWidth = MVT::getStoreSizeInBits(SrcVT);
+        MVT::ValueType NVT = MVT::getIntegerType(NewWidth);
+        SDOperand Ch;
+
+        // The extra bits are guaranteed to be zero, since we stored them that
+        // way.  A zext load from NVT thus automatically gives zext from SrcVT.
+
+        ISD::LoadExtType NewExtType =
+          ExtType == ISD::ZEXTLOAD ? ISD::ZEXTLOAD : ISD::EXTLOAD;
+
+        Result = DAG.getExtLoad(NewExtType, Node->getValueType(0),
+                                Tmp1, Tmp2, LD->getSrcValue(), SVOffset,
+                                NVT, isVolatile, Alignment);
+
+        Ch = Result.getValue(1); // The chain.
+
+        if (ExtType == ISD::SEXTLOAD)
+          // Having the top bits zero doesn't help when sign extending.
+          Result = DAG.getNode(ISD::SIGN_EXTEND_INREG, Result.getValueType(),
+                               Result, DAG.getValueType(SrcVT));
+        else if (ExtType == ISD::ZEXTLOAD || NVT == Result.getValueType())
+          // All the top bits are guaranteed to be zero - inform the optimizers.
+          Result = DAG.getNode(ISD::AssertZext, Result.getValueType(), Result,
+                               DAG.getValueType(SrcVT));
+
+        Tmp1 = LegalizeOp(Result);
+        Tmp2 = LegalizeOp(Ch);
+      } else if (SrcWidth & (SrcWidth - 1)) {
+        // If not loading a power-of-2 number of bits, expand as two loads.
+        assert(MVT::isExtendedVT(SrcVT) && !MVT::isVector(SrcVT) &&
+               "Unsupported extload!");
+        unsigned RoundWidth = 1 << Log2_32(SrcWidth);
+        assert(RoundWidth < SrcWidth);
+        unsigned ExtraWidth = SrcWidth - RoundWidth;
+        assert(ExtraWidth < RoundWidth);
+        assert(!(RoundWidth % 8) && !(ExtraWidth % 8) &&
+               "Load size not an integral number of bytes!");
+        MVT::ValueType RoundVT = MVT::getIntegerType(RoundWidth);
+        MVT::ValueType ExtraVT = MVT::getIntegerType(ExtraWidth);
+        SDOperand Lo, Hi, Ch;
+        unsigned IncrementSize;
+
+        if (TLI.isLittleEndian()) {
+          // EXTLOAD:i24 -> ZEXTLOAD:i16 | (shl EXTLOAD at +2:i8, 16)
+          // Load the bottom RoundWidth bits.
+          Lo = DAG.getExtLoad(ISD::ZEXTLOAD, Node->getValueType(0), Tmp1, Tmp2,
+                              LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
+                              Alignment);
+
+          // Load the remaining ExtraWidth bits.
+          IncrementSize = RoundWidth / 8;
+          Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
+                             DAG.getIntPtrConstant(IncrementSize));
+          Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), Tmp1, Tmp2,
+                              LD->getSrcValue(), SVOffset + IncrementSize,
+                              ExtraVT, isVolatile,
+                              MinAlign(Alignment, IncrementSize));
+
+          // Build a factor node to remember that this load is independent of the
+          // other one.
+          Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+                           Hi.getValue(1));
+
+          // Move the top bits to the right place.
+          Hi = DAG.getNode(ISD::SHL, Hi.getValueType(), Hi,
+                           DAG.getConstant(RoundWidth, TLI.getShiftAmountTy()));
+
+          // Join the hi and lo parts.
+          Result = DAG.getNode(ISD::OR, Node->getValueType(0), Lo, Hi);
         } else {
-          // If this is an unaligned load and the target doesn't support it,
-          // expand it.
-          if (!TLI.allowsUnalignedMemoryAccesses()) {
-            unsigned ABIAlignment = TLI.getTargetData()->
-              getABITypeAlignment(MVT::getTypeForValueType(LD->getLoadedVT()));
-            if (LD->getAlignment() < ABIAlignment){
-              Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG,
-                                           TLI);
-              Tmp1 = Result.getOperand(0);
-              Tmp2 = Result.getOperand(1);
-              Tmp1 = LegalizeOp(Tmp1);
-              Tmp2 = LegalizeOp(Tmp2);
+          // Big endian - avoid unaligned loads.
+          // EXTLOAD:i24 -> (shl EXTLOAD:i16, 8) | ZEXTLOAD at +2:i8
+          // Load the top RoundWidth bits.
+          Hi = DAG.getExtLoad(ExtType, Node->getValueType(0), Tmp1, Tmp2,
+                              LD->getSrcValue(), SVOffset, RoundVT, isVolatile,
+                              Alignment);
+
+          // Load the remaining ExtraWidth bits.
+          IncrementSize = RoundWidth / 8;
+          Tmp2 = DAG.getNode(ISD::ADD, Tmp2.getValueType(), Tmp2,
+                             DAG.getIntPtrConstant(IncrementSize));
+          Lo = DAG.getExtLoad(ISD::ZEXTLOAD, Node->getValueType(0), Tmp1, Tmp2,
+                              LD->getSrcValue(), SVOffset + IncrementSize,
+                              ExtraVT, isVolatile,
+                              MinAlign(Alignment, IncrementSize));
+
+          // Build a factor node to remember that this load is independent of the
+          // other one.
+          Ch = DAG.getNode(ISD::TokenFactor, MVT::Other, Lo.getValue(1),
+                           Hi.getValue(1));
+
+          // Move the top bits to the right place.
+          Hi = DAG.getNode(ISD::SHL, Hi.getValueType(), Hi,
+                           DAG.getConstant(ExtraWidth, TLI.getShiftAmountTy()));
+
+          // Join the hi and lo parts.
+          Result = DAG.getNode(ISD::OR, Node->getValueType(0), Lo, Hi);
+        }
+
+        Tmp1 = LegalizeOp(Result);
+        Tmp2 = LegalizeOp(Ch);
+      } else {
+        switch (TLI.getLoadXAction(ExtType, SrcVT)) {
+        default: assert(0 && "This action is not supported yet!");
+        case TargetLowering::Custom:
+          isCustom = true;
+          // FALLTHROUGH
+        case TargetLowering::Legal:
+          Result = DAG.UpdateNodeOperands(Result, Tmp1, Tmp2, LD->getOffset());
+          Tmp1 = Result.getValue(0);
+          Tmp2 = Result.getValue(1);
+
+          if (isCustom) {
+            Tmp3 = TLI.LowerOperation(Result, DAG);
+            if (Tmp3.Val) {
+              Tmp1 = LegalizeOp(Tmp3);
+              Tmp2 = LegalizeOp(Tmp3.getValue(1));
+            }
+          } else {
+            // If this is an unaligned load and the target doesn't support it,
+            // expand it.
+            if (!TLI.allowsUnalignedMemoryAccesses()) {
+              unsigned ABIAlignment = TLI.getTargetData()->
+                getABITypeAlignment(MVT::getTypeForValueType(LD->getLoadedVT()));
+              if (LD->getAlignment() < ABIAlignment){
+                Result = ExpandUnalignedLoad(cast<LoadSDNode>(Result.Val), DAG,
+                                             TLI);
+                Tmp1 = Result.getOperand(0);
+                Tmp2 = Result.getOperand(1);
+                Tmp1 = LegalizeOp(Tmp1);
+                Tmp2 = LegalizeOp(Tmp2);
+              }
             }
           }
-        }
-        break;
-      case TargetLowering::Expand:
-        // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
-        if (SrcVT == MVT::f32 && Node->getValueType(0) == MVT::f64) {
-          SDOperand Load = DAG.getLoad(SrcVT, Tmp1, Tmp2, LD->getSrcValue(),
-                                       LD->getSrcValueOffset(),
-                                       LD->isVolatile(), LD->getAlignment());
-          Result = DAG.getNode(ISD::FP_EXTEND, Node->getValueType(0), Load);
-          Tmp1 = LegalizeOp(Result);  // Relegalize new nodes.
-          Tmp2 = LegalizeOp(Load.getValue(1));
+          break;
+        case TargetLowering::Expand:
+          // f64 = EXTLOAD f32 should expand to LOAD, FP_EXTEND
+          if (SrcVT == MVT::f32 && Node->getValueType(0) == MVT::f64) {
+            SDOperand Load = DAG.getLoad(SrcVT, Tmp1, Tmp2, LD->getSrcValue(),
+                                         LD->getSrcValueOffset(),
+                                         LD->isVolatile(), LD->getAlignment());
+            Result = DAG.getNode(ISD::FP_EXTEND, Node->getValueType(0), Load);
+            Tmp1 = LegalizeOp(Result);  // Relegalize new nodes.
+            Tmp2 = LegalizeOp(Load.getValue(1));
+            break;
+          }
+          assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!");
+          // Turn the unsupported load into an EXTLOAD followed by an explicit
+          // zero/sign extend inreg.
+          Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0),
+                                  Tmp1, Tmp2, LD->getSrcValue(),
+                                  LD->getSrcValueOffset(), SrcVT,
+                                  LD->isVolatile(), LD->getAlignment());
+          SDOperand ValRes;
+          if (ExtType == ISD::SEXTLOAD)
+            ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, Result.getValueType(),
+                                 Result, DAG.getValueType(SrcVT));
+          else
+            ValRes = DAG.getZeroExtendInReg(Result, SrcVT);
+          Tmp1 = LegalizeOp(ValRes);  // Relegalize new nodes.
+          Tmp2 = LegalizeOp(Result.getValue(1));  // Relegalize new nodes.
           break;
         }
-        assert(ExtType != ISD::EXTLOAD &&"EXTLOAD should always be supported!");
-        // Turn the unsupported load into an EXTLOAD followed by an explicit
-        // zero/sign extend inreg.
-        Result = DAG.getExtLoad(ISD::EXTLOAD, Node->getValueType(0),
-                                Tmp1, Tmp2, LD->getSrcValue(),
-                                LD->getSrcValueOffset(), SrcVT,
-                                LD->isVolatile(), LD->getAlignment());
-        SDOperand ValRes;
-        if (ExtType == ISD::SEXTLOAD)
-          ValRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, Result.getValueType(),
-                               Result, DAG.getValueType(SrcVT));
-        else
-          ValRes = DAG.getZeroExtendInReg(Result, SrcVT);
-        Tmp1 = LegalizeOp(ValRes);  // Relegalize new nodes.
-        Tmp2 = LegalizeOp(Result.getValue(1));  // Relegalize new nodes.
-        break;
       }
+
       // Since loads produce two values, make sure to remember that we legalized
       // both of them.
       AddLegalizedOperand(SDOperand(Node, 0), Tmp1);

Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Wed Jan 23 14:39:46 2008
@@ -133,6 +133,9 @@
   // ARM does not have f32 extending load.
   setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
 
+  // ARM does not have i1 sign extending load.
+  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+
   // ARM supports all 4 flavors of integer indexed load / store.
   for (unsigned im = (unsigned)ISD::PRE_INC;
        im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {

Modified: llvm/trunk/lib/Target/IA64/IA64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/IA64/IA64ISelLowering.cpp?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/Target/IA64/IA64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/IA64/IA64ISelLowering.cpp Wed Jan 23 14:39:46 2008
@@ -37,9 +37,9 @@
 
       setLoadXAction(ISD::EXTLOAD          , MVT::i1   , Promote);
 
-      setLoadXAction(ISD::ZEXTLOAD         , MVT::i1   , Expand);
+      setLoadXAction(ISD::ZEXTLOAD         , MVT::i1   , Promote);
 
-      setLoadXAction(ISD::SEXTLOAD         , MVT::i1   , Expand);
+      setLoadXAction(ISD::SEXTLOAD         , MVT::i1   , Promote);
       setLoadXAction(ISD::SEXTLOAD         , MVT::i8   , Expand);
       setLoadXAction(ISD::SEXTLOAD         , MVT::i16  , Expand);
       setLoadXAction(ISD::SEXTLOAD         , MVT::i32  , Expand);

Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp Wed Jan 23 14:39:46 2008
@@ -52,9 +52,9 @@
   addRegisterClass(MVT::f64, PPC::F8RCRegisterClass);
   
   // PowerPC has an i16 but no i8 (or i1) SEXTLOAD
-  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
+  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
   setLoadXAction(ISD::SEXTLOAD, MVT::i8, Expand);
-    
+
   setTruncStoreAction(MVT::f64, MVT::f32, Expand);
     
   // PowerPC has pre-inc load and store's.

Modified: llvm/trunk/lib/Target/Sparc/SparcISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Sparc/SparcISelDAGToDAG.cpp?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/Target/Sparc/SparcISelDAGToDAG.cpp (original)
+++ llvm/trunk/lib/Target/Sparc/SparcISelDAGToDAG.cpp Wed Jan 23 14:39:46 2008
@@ -138,7 +138,10 @@
 
   // Turn FP extload into load/fextend
   setLoadXAction(ISD::EXTLOAD, MVT::f32, Expand);
-  
+
+  // Sparc doesn't have i1 sign extending load
+  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
+
   // Custom legalize GlobalAddress nodes into LO/HI parts.
   setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
   setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);

Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Wed Jan 23 14:39:46 2008
@@ -80,7 +80,7 @@
   if (Subtarget->is64Bit())
     addRegisterClass(MVT::i64, X86::GR64RegisterClass);
 
-  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Expand);
+  setLoadXAction(ISD::SEXTLOAD, MVT::i1, Promote);
 
   // We don't accept any truncstore of integer registers.  
   setTruncStoreAction(MVT::i64, MVT::i32, Expand);

Modified: llvm/trunk/lib/Target/X86/X86Instr64bit.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86Instr64bit.td?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86Instr64bit.td (original)
+++ llvm/trunk/lib/Target/X86/X86Instr64bit.td Wed Jan 23 14:39:46 2008
@@ -61,7 +61,6 @@
   return (int64_t)N->getValue() == (int8_t)N->getValue();
 }]>;
 
-def sextloadi64i1  : PatFrag<(ops node:$ptr), (i64 (sextloadi1 node:$ptr))>;
 def sextloadi64i8  : PatFrag<(ops node:$ptr), (i64 (sextloadi8 node:$ptr))>;
 def sextloadi64i16 : PatFrag<(ops node:$ptr), (i64 (sextloadi16 node:$ptr))>;
 def sextloadi64i32 : PatFrag<(ops node:$ptr), (i64 (sextloadi32 node:$ptr))>;
@@ -1177,8 +1176,7 @@
 def : Pat<(parallel (X86cmp GR64:$src1, 0), (implicit EFLAGS)),
           (TEST64rr GR64:$src1, GR64:$src1)>;
 
-// {s|z}extload bool -> {s|z}extload byte
-def : Pat<(sextloadi64i1 addr:$src), (MOVSX64rm8 addr:$src)>;
+// zextload bool -> zextload byte
 def : Pat<(zextloadi64i1 addr:$src), (MOVZX64rm8 addr:$src)>;
 
 // extload

Modified: llvm/trunk/lib/Target/X86/X86InstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86InstrInfo.td?rev=46280&r1=46279&r2=46280&view=diff

==============================================================================
--- llvm/trunk/lib/Target/X86/X86InstrInfo.td (original)
+++ llvm/trunk/lib/Target/X86/X86InstrInfo.td Wed Jan 23 14:39:46 2008
@@ -226,8 +226,6 @@
 def loadf64 : PatFrag<(ops node:$ptr), (f64 (load node:$ptr))>;
 def loadf80 : PatFrag<(ops node:$ptr), (f80 (load node:$ptr))>;
 
-def sextloadi16i1  : PatFrag<(ops node:$ptr), (i16 (sextloadi1 node:$ptr))>;
-def sextloadi32i1  : PatFrag<(ops node:$ptr), (i32 (sextloadi1 node:$ptr))>;
 def sextloadi16i8  : PatFrag<(ops node:$ptr), (i16 (sextloadi8 node:$ptr))>;
 def sextloadi32i8  : PatFrag<(ops node:$ptr), (i32 (sextloadi8 node:$ptr))>;
 def sextloadi32i16 : PatFrag<(ops node:$ptr), (i32 (sextloadi16 node:$ptr))>;
@@ -2608,9 +2606,7 @@
 def : Pat<(parallel (X86cmp GR32:$src1, 0), (implicit EFLAGS)),
           (TEST32rr GR32:$src1, GR32:$src1)>;
 
-// {s|z}extload bool -> {s|z}extload byte
-def : Pat<(sextloadi16i1 addr:$src), (MOVSX16rm8 addr:$src)>;
-def : Pat<(sextloadi32i1 addr:$src), (MOVSX32rm8 addr:$src)>;
+// zextload bool -> zextload byte
 def : Pat<(zextloadi8i1  addr:$src), (MOV8rm     addr:$src)>;
 def : Pat<(zextloadi16i1 addr:$src), (MOVZX16rm8 addr:$src)>;
 def : Pat<(zextloadi32i1 addr:$src), (MOVZX32rm8 addr:$src)>;





More information about the llvm-commits mailing list