[llvm-commits] CVS: llvm/lib/Target/X86/InstSelectSimple.cpp PeepholeOptimizer.cpp Printer.cpp X86InstrInfo.td
Chris Lattner
lattner at cs.uiuc.edu
Tue Feb 17 00:17:01 PST 2004
Changes in directory llvm/lib/Target/X86:
InstSelectSimple.cpp updated: 1.161 -> 1.162
PeepholeOptimizer.cpp updated: 1.19 -> 1.20
Printer.cpp updated: 1.83 -> 1.84
X86InstrInfo.td updated: 1.27 -> 1.28
---
Log message:
Rename MOVi[mr] instructions to MOV[rm]i
---
Diffs of the changes: (+42 -36)
Index: llvm/lib/Target/X86/InstSelectSimple.cpp
diff -u llvm/lib/Target/X86/InstSelectSimple.cpp:1.161 llvm/lib/Target/X86/InstSelectSimple.cpp:1.162
--- llvm/lib/Target/X86/InstSelectSimple.cpp:1.161 Mon Feb 16 22:26:43 2004
+++ llvm/lib/Target/X86/InstSelectSimple.cpp Tue Feb 17 00:16:44 2004
@@ -306,7 +306,7 @@
RegMap.erase(V); // Assign a new name to this constant if ref'd again
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
// Move the address of the global into the register
- BMI(MBB, IPt, X86::MOVir32, 1, Reg).addGlobalAddress(GV);
+ BMI(MBB, IPt, X86::MOVri32, 1, Reg).addGlobalAddress(GV);
RegMap.erase(V); // Assign a new name to this address if ref'd again
}
@@ -423,19 +423,19 @@
if (Class == cLong) {
// Copy the value into the register pair.
uint64_t Val = cast<ConstantInt>(C)->getRawValue();
- BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(Val & 0xFFFFFFFF);
- BMI(MBB, IP, X86::MOVir32, 1, R+1).addZImm(Val >> 32);
+ BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(Val & 0xFFFFFFFF);
+ BMI(MBB, IP, X86::MOVri32, 1, R+1).addZImm(Val >> 32);
return;
}
assert(Class <= cInt && "Type not handled yet!");
static const unsigned IntegralOpcodeTab[] = {
- X86::MOVir8, X86::MOVir16, X86::MOVir32
+ X86::MOVri8, X86::MOVri16, X86::MOVri32
};
if (C->getType() == Type::BoolTy) {
- BMI(MBB, IP, X86::MOVir8, 1, R).addZImm(C == ConstantBool::True);
+ BMI(MBB, IP, X86::MOVri8, 1, R).addZImm(C == ConstantBool::True);
} else {
ConstantInt *CI = cast<ConstantInt>(C);
BMI(MBB, IP, IntegralOpcodeTab[Class], 1, R).addZImm(CI->getRawValue());
@@ -458,7 +458,7 @@
} else if (isa<ConstantPointerNull>(C)) {
// Copy zero (null pointer) to the register.
- BMI(MBB, IP, X86::MOVir32, 1, R).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, R).addZImm(0);
} else if (ConstantPointerRef *CPR = dyn_cast<ConstantPointerRef>(C)) {
unsigned SrcReg = getReg(CPR->getValue(), MBB, IP);
BMI(MBB, IP, X86::MOVrr32, 1, R).addReg(SrcReg);
@@ -1211,7 +1211,7 @@
}
} else {
// Values other than zero are not implemented yet.
- BuildMI(BB, X86::MOVir32, 1, TmpReg1).addZImm(0);
+ BuildMI(BB, X86::MOVri32, 1, TmpReg1).addZImm(0);
}
return;
@@ -1287,7 +1287,7 @@
CountReg = makeAnotherReg(Type::IntTy);
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(1);
}
- BuildMI(BB, X86::MOVir16, 1, X86::AX).addZImm((Val << 8) | Val);
+ BuildMI(BB, X86::MOVri16, 1, X86::AX).addZImm((Val << 8) | Val);
Opcode = X86::REP_STOSW;
break;
case 0: // DWORD aligned
@@ -1298,13 +1298,13 @@
BuildMI(BB, X86::SHRir32, 2, CountReg).addReg(ByteReg).addZImm(2);
}
Val = (Val << 8) | Val;
- BuildMI(BB, X86::MOVir32, 1, X86::EAX).addZImm((Val << 16) | Val);
+ BuildMI(BB, X86::MOVri32, 1, X86::EAX).addZImm((Val << 16) | Val);
Opcode = X86::REP_STOSD;
break;
case 1: // BYTE aligned
case 3: // BYTE aligned
CountReg = getReg(CI.getOperand(3));
- BuildMI(BB, X86::MOVir8, 1, X86::AL).addZImm(Val);
+ BuildMI(BB, X86::MOVri8, 1, X86::AL).addZImm(Val);
Opcode = X86::REP_STOSB;
break;
}
@@ -1532,12 +1532,12 @@
}
// Most general case, emit a normal multiply...
- static const unsigned MOVirTab[] = {
- X86::MOVir8, X86::MOVir16, X86::MOVir32
+ static const unsigned MOVriTab[] = {
+ X86::MOVri8, X86::MOVri16, X86::MOVri32
};
unsigned TmpReg = makeAnotherReg(DestTy);
- BMI(MBB, IP, MOVirTab[Class], 1, TmpReg).addZImm(ConstRHS);
+ BMI(MBB, IP, MOVriTab[Class], 1, TmpReg).addZImm(ConstRHS);
// Emit a MUL to multiply the register holding the index by
// elementSize, putting the result in OffsetReg.
@@ -1647,7 +1647,7 @@
static const unsigned Regs[] ={ X86::AL , X86::AX , X86::EAX };
static const unsigned MovOpcode[]={ X86::MOVrr8, X86::MOVrr16, X86::MOVrr32 };
static const unsigned SarOpcode[]={ X86::SARir8, X86::SARir16, X86::SARir32 };
- static const unsigned ClrOpcode[]={ X86::MOVir8, X86::MOVir16, X86::MOVir32 };
+ static const unsigned ClrOpcode[]={ X86::MOVri8, X86::MOVri16, X86::MOVri32 };
static const unsigned ExtRegs[] ={ X86::AH , X86::DX , X86::EDX };
static const unsigned DivOpcode[][4] = {
@@ -1742,12 +1742,12 @@
if (isLeftShift) {
BMI(MBB, IP, X86::SHLir32, 2,
DestReg + 1).addReg(SrcReg).addZImm(Amount);
- BMI(MBB, IP, X86::MOVir32, 1,
+ BMI(MBB, IP, X86::MOVri32, 1,
DestReg).addZImm(0);
} else {
unsigned Opcode = isSigned ? X86::SARir32 : X86::SHRir32;
BMI(MBB, IP, Opcode, 2, DestReg).addReg(SrcReg+1).addZImm(Amount);
- BMI(MBB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
}
}
} else {
@@ -1761,7 +1761,7 @@
} else {
// Other shifts use a fixed zero value if the shift is more than 32
// bits.
- BMI(MBB, IP, X86::MOVir32, 1, TmpReg).addZImm(0);
+ BMI(MBB, IP, X86::MOVri32, 1, TmpReg).addZImm(0);
}
// Initialize CL with the shift amount...
@@ -1989,7 +1989,7 @@
if (isLong) { // Handle upper 32 bits as appropriate...
if (isUnsigned) // Zero out top bits...
- BMI(BB, IP, X86::MOVir32, 1, DestReg+1).addZImm(0);
+ BMI(BB, IP, X86::MOVri32, 1, DestReg+1).addZImm(0);
else // Sign extend bottom half...
BMI(BB, IP, X86::SARir32, 2, DestReg+1).addReg(DestReg).addZImm(31);
}
@@ -2040,7 +2040,7 @@
// Make a 64 bit temporary... and zero out the top of it...
unsigned TmpReg = makeAnotherReg(Type::LongTy);
BMI(BB, IP, X86::MOVrr32, 1, TmpReg).addReg(SrcReg);
- BMI(BB, IP, X86::MOVir32, 1, TmpReg+1).addZImm(0);
+ BMI(BB, IP, X86::MOVri32, 1, TmpReg+1).addZImm(0);
SrcTy = Type::LongTy;
SrcClass = cLong;
SrcReg = TmpReg;
@@ -2093,7 +2093,7 @@
addFrameReference(BMI(BB, IP, X86::MOVmr8, 4, HighPartOfCW), CWFrameIdx, 1);
// Set the high part to be round to zero...
- addFrameReference(BMI(BB, IP, X86::MOVim8, 5), CWFrameIdx, 1).addZImm(12);
+ addFrameReference(BMI(BB, IP, X86::MOVmi8, 5), CWFrameIdx, 1).addZImm(12);
// Reload the modified control word now...
addFrameReference(BMI(BB, IP, X86::FLDCWm16, 4), CWFrameIdx);
Index: llvm/lib/Target/X86/PeepholeOptimizer.cpp
diff -u llvm/lib/Target/X86/PeepholeOptimizer.cpp:1.19 llvm/lib/Target/X86/PeepholeOptimizer.cpp:1.20
--- llvm/lib/Target/X86/PeepholeOptimizer.cpp:1.19 Tue Feb 17 00:02:15 2004
+++ llvm/lib/Target/X86/PeepholeOptimizer.cpp Tue Feb 17 00:16:44 2004
@@ -179,9 +179,9 @@
return false;
#if 0
- case X86::MOVir32: Size++;
- case X86::MOVir16: Size++;
- case X86::MOVir8:
+ case X86::MOVri32: Size++;
+ case X86::MOVri16: Size++;
+ case X86::MOVri8:
// FIXME: We can only do this transformation if we know that flags are not
// used here, because XOR clobbers the flags!
if (MI->getOperand(1).isImmediate()) { // avoid mov EAX, <value>
@@ -373,7 +373,7 @@
// Attempt to fold instructions used by the base register into the instruction
if (MachineInstr *DefInst = getDefiningInst(BaseRegOp)) {
switch (DefInst->getOpcode()) {
- case X86::MOVir32:
+ case X86::MOVri32:
// If there is no displacement set for this instruction set one now.
// FIXME: If we can fold two immediates together, we should do so!
if (DisplacementOp.isImmediate() && !DisplacementOp.getImmedValue()) {
@@ -461,14 +461,14 @@
// Register to memory stores. Format: <base,scale,indexreg,immdisp>, srcreg
case X86::MOVrm32: case X86::MOVrm16: case X86::MOVrm8:
- case X86::MOVim32: case X86::MOVim16: case X86::MOVim8:
+ case X86::MOVmi32: case X86::MOVmi16: case X86::MOVmi8:
// Check to see if we can fold the source instruction into this one...
if (MachineInstr *SrcInst = getDefiningInst(MI->getOperand(4))) {
switch (SrcInst->getOpcode()) {
// Fold the immediate value into the store, if possible.
- case X86::MOVir8: return Propagate(MI, 4, SrcInst, 1, X86::MOVim8);
- case X86::MOVir16: return Propagate(MI, 4, SrcInst, 1, X86::MOVim16);
- case X86::MOVir32: return Propagate(MI, 4, SrcInst, 1, X86::MOVim32);
+ case X86::MOVri8: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi8);
+ case X86::MOVri16: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi16);
+ case X86::MOVri32: return Propagate(MI, 4, SrcInst, 1, X86::MOVmi32);
default: break;
}
}
Index: llvm/lib/Target/X86/Printer.cpp
diff -u llvm/lib/Target/X86/Printer.cpp:1.83 llvm/lib/Target/X86/Printer.cpp:1.84
--- llvm/lib/Target/X86/Printer.cpp:1.83 Mon Feb 16 22:26:43 2004
+++ llvm/lib/Target/X86/Printer.cpp Tue Feb 17 00:16:44 2004
@@ -642,13 +642,19 @@
// These instructions are the same as MRMDestReg, but instead of having a
// register reference for the mod/rm field, it's a memory reference.
//
- assert(isMem(MI, 0) && MI->getNumOperands() == 4+1 &&
- MI->getOperand(4).isRegister() && "Bad format for MRMDestMem!");
+ assert(isMem(MI, 0) &&
+ (MI->getNumOperands() == 4+1 ||
+ (MI->getNumOperands() == 4+2 && MI->getOperand(5).isImmediate()))
+ && "Bad format for MRMDestMem!");
O << TII.getName(MI->getOpcode()) << " " << sizePtr(Desc) << " ";
printMemReference(MI, 0);
O << ", ";
printOp(MI->getOperand(4));
+ if (MI->getNumOperands() == 4+2) {
+ O << ", ";
+ printOp(MI->getOperand(5));
+ }
O << "\n";
return;
}
Index: llvm/lib/Target/X86/X86InstrInfo.td
diff -u llvm/lib/Target/X86/X86InstrInfo.td:1.27 llvm/lib/Target/X86/X86InstrInfo.td:1.28
--- llvm/lib/Target/X86/X86InstrInfo.td:1.27 Mon Feb 16 23:25:50 2004
+++ llvm/lib/Target/X86/X86InstrInfo.td Tue Feb 17 00:16:44 2004
@@ -195,12 +195,12 @@
def MOVrr8 : X86Inst<"mov", 0x88, MRMDestReg, Arg8>, Pattern<(set R8 , R8 )>;
def MOVrr16 : X86Inst<"mov", 0x89, MRMDestReg, Arg16>, OpSize, Pattern<(set R16, R16)>;
def MOVrr32 : X86Inst<"mov", 0x89, MRMDestReg, Arg32>, Pattern<(set R32, R32)>;
-def MOVir8 : X86Inst<"mov", 0xB0, AddRegFrm , Arg8>, Pattern<(set R8 , imm )>;
-def MOVir16 : X86Inst<"mov", 0xB8, AddRegFrm , Arg16>, OpSize, Pattern<(set R16, imm)>;
-def MOVir32 : X86Inst<"mov", 0xB8, AddRegFrm , Arg32>, Pattern<(set R32, imm)>;
-def MOVim8 : X86Inst<"mov", 0xC6, MRMS0m , Arg8>; // [mem] = imm8
-def MOVim16 : X86Inst<"mov", 0xC7, MRMS0m , Arg16>, OpSize; // [mem] = imm16
-def MOVim32 : X86Inst<"mov", 0xC7, MRMS0m , Arg32>; // [mem] = imm32
+def MOVri8 : X86Inst<"mov", 0xB0, AddRegFrm , Arg8>, Pattern<(set R8 , imm )>;
+def MOVri16 : X86Inst<"mov", 0xB8, AddRegFrm , Arg16>, OpSize, Pattern<(set R16, imm)>;
+def MOVri32 : X86Inst<"mov", 0xB8, AddRegFrm , Arg32>, Pattern<(set R32, imm)>;
+def MOVmi8 : X86Inst<"mov", 0xC6, MRMS0m , Arg8>; // [mem] = imm8
+def MOVmi16 : X86Inst<"mov", 0xC7, MRMS0m , Arg16>, OpSize; // [mem] = imm16
+def MOVmi32 : X86Inst<"mov", 0xC7, MRMS0m , Arg32>; // [mem] = imm32
def MOVmr8 : X86Inst<"mov", 0x8A, MRMSrcMem , Arg8>; // R8 = [mem]
def MOVmr16 : X86Inst<"mov", 0x8B, MRMSrcMem , Arg16>, OpSize, // R16 = [mem]
More information about the llvm-commits
mailing list