[llvm-commits] CVS: llvm/lib/Target/X86/InstSelectSimple.cpp
Chris Lattner
lattner at cs.uiuc.edu
Sun Mar 7 19:20:07 PST 2004
Changes in directory llvm/lib/Target/X86:
InstSelectSimple.cpp updated: 1.190 -> 1.191
---
Log message:
Rearrange and refactor some code. No functionality changes.
---
Diffs of the changes: (+96 -101)
Index: llvm/lib/Target/X86/InstSelectSimple.cpp
diff -u llvm/lib/Target/X86/InstSelectSimple.cpp:1.190 llvm/lib/Target/X86/InstSelectSimple.cpp:1.191
--- llvm/lib/Target/X86/InstSelectSimple.cpp:1.190 Mon Mar 1 17:53:11 2004
+++ llvm/lib/Target/X86/InstSelectSimple.cpp Sun Mar 7 19:18:36 2004
@@ -199,8 +199,14 @@
///
void promote32(unsigned targetReg, const ValueRecord &VR);
- // getGEPIndex - This is used to fold GEP instructions into X86 addressing
- // expressions.
+ /// getAddressingMode - Get the addressing mode to use to address the
+ /// specified value. The returned value should be used with addFullAddress.
+ void getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
+ unsigned &IndexReg, unsigned &Disp);
+
+
+ /// getGEPIndex - This is used to fold GEP instructions into X86 addressing
+ /// expressions.
void getGEPIndex(MachineBasicBlock *MBB, MachineBasicBlock::iterator IP,
std::vector<Value*> &GEPOps,
std::vector<const Type*> &GEPTypes, unsigned &BaseReg,
@@ -1416,8 +1422,9 @@
void ISel::visitSimpleBinary(BinaryOperator &B, unsigned OperatorClass) {
unsigned DestReg = getReg(B);
MachineBasicBlock::iterator MI = BB->end();
- emitSimpleBinaryOperation(BB, MI, B.getOperand(0), B.getOperand(1),
- OperatorClass, DestReg);
+ Value *Op0 = B.getOperand(0), *Op1 = B.getOperand(1);
+
+ emitSimpleBinaryOperation(BB, MI, Op0, Op1, OperatorClass, DestReg);
}
/// emitSimpleBinaryOperation - Implement simple binary operators for integral
@@ -1459,83 +1466,83 @@
return;
}
- if (!isa<ConstantInt>(Op1) || Class == cLong) {
- static const unsigned OpcodeTab[][4] = {
+ // Special case: op Reg, <const>
+ if (Class != cLong && isa<ConstantInt>(Op1)) {
+ ConstantInt *Op1C = cast<ConstantInt>(Op1);
+ unsigned Op0r = getReg(Op0, MBB, IP);
+
+ // xor X, -1 -> not X
+ if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
+ static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
+ BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
+ return;
+ }
+
+ // add X, -1 -> dec X
+ if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
+ static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
+ BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
+ return;
+ }
+
+ // add X, 1 -> inc X
+ if (OperatorClass == 0 && Op1C->equalsInt(1)) {
+ static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
+ BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
+ return;
+ }
+
+ static const unsigned OpcodeTab[][3] = {
// Arithmetic operators
- { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
- { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
-
+ { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
+ { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
+
// Bitwise operators
- { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
- { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
- { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
+ { X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
+ { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
+ { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
};
-
- bool isLong = false;
- if (Class == cLong) {
- isLong = true;
- Class = cInt; // Bottom 32 bits are handled just like ints
- }
-
+
+ assert(Class < cFP && "General code handles 64-bit integer types!");
unsigned Opcode = OpcodeTab[OperatorClass][Class];
- assert(Opcode && "Floating point arguments to logical inst?");
- unsigned Op0r = getReg(Op0, MBB, IP);
- unsigned Op1r = getReg(Op1, MBB, IP);
- BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
-
- if (isLong) { // Handle the upper 32 bits of long values...
- static const unsigned TopTab[] = {
- X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
- };
- BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
- DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
- }
- return;
- }
-
- // Special case: op Reg, <const>
- ConstantInt *Op1C = cast<ConstantInt>(Op1);
- unsigned Op0r = getReg(Op0, MBB, IP);
- // xor X, -1 -> not X
- if (OperatorClass == 4 && Op1C->isAllOnesValue()) {
- static unsigned const NOTTab[] = { X86::NOT8r, X86::NOT16r, X86::NOT32r };
- BuildMI(*MBB, IP, NOTTab[Class], 1, DestReg).addReg(Op0r);
- return;
- }
- // add X, -1 -> dec X
- if (OperatorClass == 0 && Op1C->isAllOnesValue()) {
- static unsigned const DECTab[] = { X86::DEC8r, X86::DEC16r, X86::DEC32r };
- BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
+ uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
+ BuildMI(*MBB, IP, Opcode, 5, DestReg).addReg(Op0r).addImm(Op1v);
return;
}
- // add X, 1 -> inc X
- if (OperatorClass == 0 && Op1C->equalsInt(1)) {
- static unsigned const DECTab[] = { X86::INC8r, X86::INC16r, X86::INC32r };
- BuildMI(*MBB, IP, DECTab[Class], 1, DestReg).addReg(Op0r);
- return;
- }
-
- static const unsigned OpcodeTab[][3] = {
+ // Finally, handle the general case now.
+ static const unsigned OpcodeTab[][4] = {
// Arithmetic operators
- { X86::ADD8ri, X86::ADD16ri, X86::ADD32ri }, // ADD
- { X86::SUB8ri, X86::SUB16ri, X86::SUB32ri }, // SUB
-
+ { X86::ADD8rr, X86::ADD16rr, X86::ADD32rr, X86::FpADD }, // ADD
+ { X86::SUB8rr, X86::SUB16rr, X86::SUB32rr, X86::FpSUB }, // SUB
+
// Bitwise operators
- { X86::AND8ri, X86::AND16ri, X86::AND32ri }, // AND
- { X86:: OR8ri, X86:: OR16ri, X86:: OR32ri }, // OR
- { X86::XOR8ri, X86::XOR16ri, X86::XOR32ri }, // XOR
+ { X86::AND8rr, X86::AND16rr, X86::AND32rr, 0 }, // AND
+ { X86:: OR8rr, X86:: OR16rr, X86:: OR32rr, 0 }, // OR
+ { X86::XOR8rr, X86::XOR16rr, X86::XOR32rr, 0 }, // XOR
};
-
- assert(Class < 3 && "General code handles 64-bit integer types!");
+
+ bool isLong = false;
+ if (Class == cLong) {
+ isLong = true;
+ Class = cInt; // Bottom 32 bits are handled just like ints
+ }
+
unsigned Opcode = OpcodeTab[OperatorClass][Class];
- uint64_t Op1v = cast<ConstantInt>(Op1C)->getRawValue();
-
- // Mask off any upper bits of the constant, if there are any...
- Op1v &= (1ULL << (8 << Class)) - 1;
- BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addImm(Op1v);
+ assert(Opcode && "Floating point arguments to logical inst?");
+ unsigned Op0r = getReg(Op0, MBB, IP);
+ unsigned Op1r = getReg(Op1, MBB, IP);
+ BuildMI(*MBB, IP, Opcode, 2, DestReg).addReg(Op0r).addReg(Op1r);
+
+ if (isLong) { // Handle the upper 32 bits of long values...
+ static const unsigned TopTab[] = {
+ X86::ADC32rr, X86::SBB32rr, X86::AND32rr, X86::OR32rr, X86::XOR32rr
+ };
+ BuildMI(*MBB, IP, TopTab[OperatorClass], 2,
+ DestReg+1).addReg(Op0r+1).addReg(Op1r+1);
+ }
}
/// doMultiply - Emit appropriate instructions to multiply together the
@@ -1904,30 +1911,34 @@
}
-/// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
-/// instruction. The load and store instructions are the only place where we
-/// need to worry about the memory layout of the target machine.
-///
-void ISel::visitLoadInst(LoadInst &I) {
- unsigned DestReg = getReg(I);
- unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
- Value *Addr = I.getOperand(0);
+void ISel::getAddressingMode(Value *Addr, unsigned &BaseReg, unsigned &Scale,
+ unsigned &IndexReg, unsigned &Disp) {
+ BaseReg = 0; Scale = 1; IndexReg = 0; Disp = 0;
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
BaseReg, Scale, IndexReg, Disp))
- Addr = 0; // Address is consumed!
+ return;
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
if (CE->getOpcode() == Instruction::GetElementPtr)
if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
BaseReg, Scale, IndexReg, Disp))
- Addr = 0;
+ return;
}
- if (Addr) {
- // If it's not foldable, reset addr mode.
- BaseReg = getReg(Addr);
- Scale = 1; IndexReg = 0; Disp = 0;
- }
+ // If it's not foldable, reset addr mode.
+ BaseReg = getReg(Addr);
+ Scale = 1; IndexReg = 0; Disp = 0;
+}
+
+
+/// visitLoadInst - Implement LLVM load instructions in terms of the x86 'mov'
+/// instruction. The load and store instructions are the only place where we
+/// need to worry about the memory layout of the target machine.
+///
+void ISel::visitLoadInst(LoadInst &I) {
+ unsigned DestReg = getReg(I);
+ unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
+ getAddressingMode(I.getOperand(0), BaseReg, Scale, IndexReg, Disp);
unsigned Class = getClassB(I.getType());
if (Class == cLong) {
@@ -1951,24 +1962,8 @@
/// instruction.
///
void ISel::visitStoreInst(StoreInst &I) {
- unsigned BaseReg = 0, Scale = 1, IndexReg = 0, Disp = 0;
- Value *Addr = I.getOperand(1);
- if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Addr)) {
- if (isGEPFoldable(BB, GEP->getOperand(0), GEP->op_begin()+1, GEP->op_end(),
- BaseReg, Scale, IndexReg, Disp))
- Addr = 0; // Address is consumed!
- } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
- if (CE->getOpcode() == Instruction::GetElementPtr)
- if (isGEPFoldable(BB, CE->getOperand(0), CE->op_begin()+1, CE->op_end(),
- BaseReg, Scale, IndexReg, Disp))
- Addr = 0;
- }
-
- if (Addr) {
- // If it's not foldable, reset addr mode.
- BaseReg = getReg(Addr);
- Scale = 1; IndexReg = 0; Disp = 0;
- }
+ unsigned BaseReg, Scale, IndexReg, Disp;
+ getAddressingMode(I.getOperand(1), BaseReg, Scale, IndexReg, Disp);
const Type *ValTy = I.getOperand(0)->getType();
unsigned Class = getClassB(ValTy);
More information about the llvm-commits
mailing list