[llvm] e981a46 - [VE] Update lea/load/store instructions
Simon Moll via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 6 02:50:37 PDT 2020
Author: Kazushi (Jam) Marukawa
Date: 2020-04-06T11:49:46+02:00
New Revision: e981a46a772f598709f08f28021dbdf40c8b65ea
URL: https://github.com/llvm/llvm-project/commit/e981a46a772f598709f08f28021dbdf40c8b65ea
DIFF: https://github.com/llvm/llvm-project/commit/e981a46a772f598709f08f28021dbdf40c8b65ea.diff
LOG: [VE] Update lea/load/store instructions
Summary:
Modify lea/load/store instructions to accept `disp(index, base)`
style addressing mode (called ASX format). Also, uniform the
number of DAG nodes to have 3 operands for this ASX format
instructions, and update selectADDR functions to lower
appropriate MI.
Reviewers: arsenm, simoll, k-ishizaka
Reviewed By: simoll
Differential Revision: https://reviews.llvm.org/D76822
Added:
Modified:
llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.cpp
llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.h
llvm/lib/Target/VE/VEAsmPrinter.cpp
llvm/lib/Target/VE/VEFrameLowering.cpp
llvm/lib/Target/VE/VEISelDAGToDAG.cpp
llvm/lib/Target/VE/VEInstrInfo.cpp
llvm/lib/Target/VE/VEInstrInfo.td
llvm/lib/Target/VE/VERegisterInfo.cpp
llvm/test/CodeGen/VE/addition.ll
llvm/test/CodeGen/VE/branch1.ll
llvm/test/CodeGen/VE/call.ll
llvm/test/CodeGen/VE/callee.ll
llvm/test/CodeGen/VE/callstruct.ll
llvm/test/CodeGen/VE/cast.ll
llvm/test/CodeGen/VE/constants.ll
llvm/test/CodeGen/VE/ctlz.ll
llvm/test/CodeGen/VE/cttz.ll
llvm/test/CodeGen/VE/fp_add.ll
llvm/test/CodeGen/VE/fp_div.ll
llvm/test/CodeGen/VE/fp_extload_truncstore.ll
llvm/test/CodeGen/VE/fp_mul.ll
llvm/test/CodeGen/VE/fp_sub.ll
llvm/test/CodeGen/VE/int_to_fp.ll
llvm/test/CodeGen/VE/load-align1.ll
llvm/test/CodeGen/VE/load-align2.ll
llvm/test/CodeGen/VE/load-align4.ll
llvm/test/CodeGen/VE/load-align8.ll
llvm/test/CodeGen/VE/load.ll
llvm/test/CodeGen/VE/load_gv.ll
llvm/test/CodeGen/VE/pic_access_data.ll
llvm/test/CodeGen/VE/pic_access_static_data.ll
llvm/test/CodeGen/VE/pic_indirect_func_call.ll
llvm/test/CodeGen/VE/sext_zext_load.ll
llvm/test/CodeGen/VE/simple_prologue_epilogue.ll
llvm/test/CodeGen/VE/store-align1.ll
llvm/test/CodeGen/VE/store-align2.ll
llvm/test/CodeGen/VE/store-align4.ll
llvm/test/CodeGen/VE/store-align8.ll
llvm/test/CodeGen/VE/store.ll
llvm/test/CodeGen/VE/store_gv.ll
llvm/test/CodeGen/VE/subtraction.ll
llvm/test/CodeGen/VE/tls.ll
llvm/test/CodeGen/VE/truncstore.ll
llvm/test/CodeGen/VE/va_arg.ll
llvm/test/CodeGen/VE/va_callee.ll
llvm/test/CodeGen/VE/va_caller.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.cpp b/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.cpp
index 27dc3a0a000a..09f6f0d6e67d 100644
--- a/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.cpp
+++ b/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.cpp
@@ -47,9 +47,9 @@ void VEInstPrinter::printInst(const MCInst *MI, uint64_t Address,
printAnnotation(OS, Annot);
}
-void VEInstPrinter::printOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &O) {
- const MCOperand &MO = MI->getOperand(opNum);
+ const MCOperand &MO = MI->getOperand(OpNum);
if (MO.isReg()) {
printRegName(O, MO.getReg());
@@ -70,48 +70,103 @@ void VEInstPrinter::printOperand(const MCInst *MI, int opNum,
MO.getExpr()->print(O, &MAI);
}
-void VEInstPrinter::printMemASXOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printMemASXOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O, const char *Modifier) {
// If this is an ADD operand, emit it like normal operands.
if (Modifier && !strcmp(Modifier, "arith")) {
- printOperand(MI, opNum, STI, O);
+ printOperand(MI, OpNum, STI, O);
O << ", ";
- printOperand(MI, opNum + 1, STI, O);
+ printOperand(MI, OpNum + 1, STI, O);
return;
}
- const MCOperand &MO = MI->getOperand(opNum + 1);
- if (!MO.isImm() || MO.getImm() != 0) {
- printOperand(MI, opNum + 1, STI, O);
+ if (MI->getOperand(OpNum + 2).isImm() &&
+ MI->getOperand(OpNum + 2).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ printOperand(MI, OpNum + 2, STI, O);
+ }
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0 &&
+ MI->getOperand(OpNum).isImm() && MI->getOperand(OpNum).getImm() == 0) {
+ if (MI->getOperand(OpNum + 2).isImm() &&
+ MI->getOperand(OpNum + 2).getImm() == 0) {
+ O << "0";
+ } else {
+ // don't print "+0,+0"
+ }
+ } else {
+ O << "(";
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ printOperand(MI, OpNum + 1, STI, O);
+ }
+ if (MI->getOperand(OpNum).isImm() && MI->getOperand(OpNum).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ O << ", ";
+ printOperand(MI, OpNum, STI, O);
+ }
+ O << ")";
+ }
+}
+
+void VEInstPrinter::printMemASOperandASX(const MCInst *MI, int OpNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O, const char *Modifier) {
+ // If this is an ADD operand, emit it like normal operands.
+ if (Modifier && !strcmp(Modifier, "arith")) {
+ printOperand(MI, OpNum, STI, O);
+ O << ", ";
+ printOperand(MI, OpNum + 1, STI, O);
+ return;
+ }
+
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0) {
+ // don't print "+0"
+ } else {
+ printOperand(MI, OpNum + 1, STI, O);
+ }
+ if (MI->getOperand(OpNum).isImm() && MI->getOperand(OpNum).getImm() == 0) {
+ if (MI->getOperand(OpNum + 1).isImm() &&
+ MI->getOperand(OpNum + 1).getImm() == 0) {
+ O << "0";
+ } else {
+ // don't print "(0)"
+ }
+ } else {
+ O << "(, ";
+ printOperand(MI, OpNum, STI, O);
+ O << ")";
}
- O << "(,";
- printOperand(MI, opNum, STI, O);
- O << ")";
}
-void VEInstPrinter::printMemASOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printMemASOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI,
raw_ostream &O, const char *Modifier) {
// If this is an ADD operand, emit it like normal operands.
if (Modifier && !strcmp(Modifier, "arith")) {
- printOperand(MI, opNum, STI, O);
+ printOperand(MI, OpNum, STI, O);
O << ", ";
- printOperand(MI, opNum + 1, STI, O);
+ printOperand(MI, OpNum + 1, STI, O);
return;
}
- const MCOperand &MO = MI->getOperand(opNum + 1);
+ const MCOperand &MO = MI->getOperand(OpNum + 1);
if (!MO.isImm() || MO.getImm() != 0) {
- printOperand(MI, opNum + 1, STI, O);
+ printOperand(MI, OpNum + 1, STI, O);
}
O << "(";
- printOperand(MI, opNum, STI, O);
+ printOperand(MI, OpNum, STI, O);
O << ")";
}
-void VEInstPrinter::printCCOperand(const MCInst *MI, int opNum,
+void VEInstPrinter::printCCOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &O) {
- int CC = (int)MI->getOperand(opNum).getImm();
+ int CC = (int)MI->getOperand(OpNum).getImm();
O << VECondCodeToString((VECC::CondCode)CC);
}
diff --git a/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.h b/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.h
index 90c9f7b71750..33d0a22f2f8f 100644
--- a/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.h
+++ b/llvm/lib/Target/VE/MCTargetDesc/VEInstPrinter.h
@@ -34,15 +34,18 @@ class VEInstPrinter : public MCInstPrinter {
raw_ostream &);
static const char *getRegisterName(unsigned RegNo);
- void printOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
+ void printOperand(const MCInst *MI, int OpNum, const MCSubtargetInfo &STI,
raw_ostream &OS);
- void printMemASXOperand(const MCInst *MI, int opNum,
+ void printMemASXOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &OS,
const char *Modifier = nullptr);
- void printMemASOperand(const MCInst *MI, int opNum,
+ void printMemASOperandASX(const MCInst *MI, int OpNum,
+ const MCSubtargetInfo &STI, raw_ostream &OS,
+ const char *Modifier = nullptr);
+ void printMemASOperand(const MCInst *MI, int OpNum,
const MCSubtargetInfo &STI, raw_ostream &OS,
const char *Modifier = nullptr);
- void printCCOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
+ void printCCOperand(const MCInst *MI, int OpNum, const MCSubtargetInfo &STI,
raw_ostream &OS);
};
} // namespace llvm
diff --git a/llvm/lib/Target/VE/VEAsmPrinter.cpp b/llvm/lib/Target/VE/VEAsmPrinter.cpp
index 19a01f4f0749..f9204798475b 100644
--- a/llvm/lib/Target/VE/VEAsmPrinter.cpp
+++ b/llvm/lib/Target/VE/VEAsmPrinter.cpp
@@ -96,8 +96,11 @@ static void emitBSIC(MCStreamer &OutStreamer, MCOperand &R1, MCOperand &R2,
static void emitLEAzzi(MCStreamer &OutStreamer, MCOperand &Imm, MCOperand &RD,
const MCSubtargetInfo &STI) {
MCInst LEAInst;
- LEAInst.setOpcode(VE::LEAzzi);
+ LEAInst.setOpcode(VE::LEAzii);
LEAInst.addOperand(RD);
+ MCOperand CZero = MCOperand::createImm(0);
+ LEAInst.addOperand(CZero);
+ LEAInst.addOperand(CZero);
LEAInst.addOperand(Imm);
OutStreamer.emitInstruction(LEAInst, STI);
}
@@ -105,8 +108,11 @@ static void emitLEAzzi(MCStreamer &OutStreamer, MCOperand &Imm, MCOperand &RD,
static void emitLEASLzzi(MCStreamer &OutStreamer, MCOperand &Imm, MCOperand &RD,
const MCSubtargetInfo &STI) {
MCInst LEASLInst;
- LEASLInst.setOpcode(VE::LEASLzzi);
+ LEASLInst.setOpcode(VE::LEASLzii);
LEASLInst.addOperand(RD);
+ MCOperand CZero = MCOperand::createImm(0);
+ LEASLInst.addOperand(CZero);
+ LEASLInst.addOperand(CZero);
LEASLInst.addOperand(Imm);
OutStreamer.emitInstruction(LEASLInst, STI);
}
@@ -116,6 +122,8 @@ static void emitLEAzii(MCStreamer &OutStreamer, MCOperand &RS1, MCOperand &Imm,
MCInst LEAInst;
LEAInst.setOpcode(VE::LEAzii);
LEAInst.addOperand(RD);
+ MCOperand CZero = MCOperand::createImm(0);
+ LEAInst.addOperand(CZero);
LEAInst.addOperand(RS1);
LEAInst.addOperand(Imm);
OutStreamer.emitInstruction(LEAInst, STI);
@@ -126,9 +134,9 @@ static void emitLEASLrri(MCStreamer &OutStreamer, MCOperand &RS1,
const MCSubtargetInfo &STI) {
MCInst LEASLInst;
LEASLInst.setOpcode(VE::LEASLrri);
+ LEASLInst.addOperand(RD);
LEASLInst.addOperand(RS1);
LEASLInst.addOperand(RS2);
- LEASLInst.addOperand(RD);
LEASLInst.addOperand(Imm);
OutStreamer.emitInstruction(LEASLInst, STI);
}
diff --git a/llvm/lib/Target/VE/VEFrameLowering.cpp b/llvm/lib/Target/VE/VEFrameLowering.cpp
index 1305f12d7a34..3eafc8e2b988 100644
--- a/llvm/lib/Target/VE/VEFrameLowering.cpp
+++ b/llvm/lib/Target/VE/VEFrameLowering.cpp
@@ -49,20 +49,24 @@ void VEFrameLowering::emitPrologueInsns(MachineFunction &MF,
// st %plt, 32(,%sp)
// or %fp, 0, %sp
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
.addImm(0)
+ .addImm(0)
.addReg(VE::SX9);
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
+ .addImm(0)
.addImm(8)
.addReg(VE::SX10);
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
+ .addImm(0)
.addImm(24)
.addReg(VE::SX15);
- BuildMI(MBB, MBBI, dl, TII.get(VE::STSri))
+ BuildMI(MBB, MBBI, dl, TII.get(VE::STrii))
.addReg(VE::SX11)
+ .addImm(0)
.addImm(32)
.addReg(VE::SX16);
BuildMI(MBB, MBBI, dl, TII.get(VE::ORri), VE::SX9)
@@ -90,17 +94,21 @@ void VEFrameLowering::emitEpilogueInsns(MachineFunction &MF,
BuildMI(MBB, MBBI, dl, TII.get(VE::ORri), VE::SX11)
.addReg(VE::SX9)
.addImm(0);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX16)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX16)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(32);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX15)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX15)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(24);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX10)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX10)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(8);
- BuildMI(MBB, MBBI, dl, TII.get(VE::LDSri), VE::SX9)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LDrii), VE::SX9)
.addReg(VE::SX11)
+ .addImm(0)
.addImm(0);
}
@@ -124,7 +132,9 @@ void VEFrameLowering::emitSPAdjustment(MachineFunction &MF,
// lea %s13,%lo(NumBytes)
// and %s13,%s13,(32)0
// lea.sl %sp,%hi(NumBytes)(%sp, %s13)
- BuildMI(MBB, MBBI, dl, TII.get(VE::LEAzzi), VE::SX13)
+ BuildMI(MBB, MBBI, dl, TII.get(VE::LEAzii), VE::SX13)
+ .addImm(0)
+ .addImm(0)
.addImm(Lo_32(NumBytes));
BuildMI(MBB, MBBI, dl, TII.get(VE::ANDrm0), VE::SX13)
.addReg(VE::SX13)
diff --git a/llvm/lib/Target/VE/VEISelDAGToDAG.cpp b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp
index c5c449b89e97..4655ca7cd553 100644
--- a/llvm/lib/Target/VE/VEISelDAGToDAG.cpp
+++ b/llvm/lib/Target/VE/VEISelDAGToDAG.cpp
@@ -44,8 +44,11 @@ class VEDAGToDAGISel : public SelectionDAGISel {
void Select(SDNode *N) override;
// Complex Pattern Selectors.
- bool SelectADDRrr(SDValue N, SDValue &R1, SDValue &R2);
- bool SelectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
+ bool selectADDRrri(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRrii(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRzri(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRzii(SDValue N, SDValue &Base, SDValue &Index, SDValue &Offset);
+ bool selectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
StringRef getPassName() const override {
return "VE DAG->DAG Pattern Instruction Selection";
@@ -56,10 +59,14 @@ class VEDAGToDAGISel : public SelectionDAGISel {
private:
SDNode *getGlobalBaseReg();
+
+ bool matchADDRrr(SDValue N, SDValue &Base, SDValue &Index);
+ bool matchADDRri(SDValue N, SDValue &Base, SDValue &Offset);
};
} // end anonymous namespace
-bool VEDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
+bool VEDAGToDAGISel::selectADDRrri(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
if (Addr.getOpcode() == ISD::FrameIndex)
return false;
if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
@@ -67,23 +74,112 @@ bool VEDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
return false; // direct calls.
- if (Addr.getOpcode() == ISD::ADD) {
- if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
- if (isInt<13>(CN->getSExtValue()))
- return false; // Let the reg+imm pattern catch this!
- if (Addr.getOperand(0).getOpcode() == VEISD::Lo ||
- Addr.getOperand(1).getOpcode() == VEISD::Lo)
- return false; // Let the reg+imm pattern catch this!
- R1 = Addr.getOperand(0);
- R2 = Addr.getOperand(1);
+ SDValue LHS, RHS;
+ if (matchADDRri(Addr, LHS, RHS)) {
+ if (matchADDRrr(LHS, Base, Index)) {
+ Offset = RHS;
+ return true;
+ }
+ // Return false to try selectADDRrii.
+ return false;
+ }
+ if (matchADDRrr(Addr, LHS, RHS)) {
+ if (matchADDRri(RHS, Index, Offset)) {
+ Base = LHS;
+ return true;
+ }
+ if (matchADDRri(LHS, Base, Offset)) {
+ Index = RHS;
+ return true;
+ }
+ Base = LHS;
+ Index = RHS;
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+ }
+ return false; // Let the reg+imm(=0) pattern catch this!
+}
+
+bool VEDAGToDAGISel::selectADDRrii(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
+ if (matchADDRri(Addr, Base, Offset)) {
+ Index = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
return true;
}
- return false; // Let the reg+imm pattern catch this!
+ Base = Addr;
+ Index = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+}
+
+bool VEDAGToDAGISel::selectADDRzri(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
+ // Prefer ADDRrii.
+ return false;
+}
+
+bool VEDAGToDAGISel::selectADDRzii(SDValue Addr, SDValue &Base, SDValue &Index,
+ SDValue &Offset) {
+ if (dyn_cast<FrameIndexSDNode>(Addr)) {
+ return false;
+ }
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false; // direct calls.
+
+ if (ConstantSDNode *CN = cast<ConstantSDNode>(Addr)) {
+ if (isInt<32>(CN->getSExtValue())) {
+ Base = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ Index = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ Offset =
+ CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr), MVT::i32);
+ return true;
+ }
+ }
+ return false;
}
-bool VEDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base,
+bool VEDAGToDAGISel::selectADDRri(SDValue Addr, SDValue &Base,
SDValue &Offset) {
+ if (matchADDRri(Addr, Base, Offset))
+ return true;
+
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+}
+
+bool VEDAGToDAGISel::matchADDRrr(SDValue Addr, SDValue &Base, SDValue &Index) {
+ if (dyn_cast<FrameIndexSDNode>(Addr))
+ return false;
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false; // direct calls.
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ ; // Nothing to do here.
+ } else if (Addr.getOpcode() == ISD::OR) {
+ // We want to look through a transform in InstCombine and DAGCombiner that
+ // turns 'add' into 'or', so we can treat this 'or' exactly like an 'add'.
+ if (!CurDAG->haveNoCommonBitsSet(Addr.getOperand(0), Addr.getOperand(1)))
+ return false;
+ } else {
+ return false;
+ }
+
+ if (Addr.getOperand(0).getOpcode() == VEISD::Lo ||
+ Addr.getOperand(1).getOpcode() == VEISD::Lo)
+ return false; // Let the LEASL patterns catch this!
+
+ Base = Addr.getOperand(0);
+ Index = Addr.getOperand(1);
+ return true;
+}
+
+bool VEDAGToDAGISel::matchADDRri(SDValue Addr, SDValue &Base, SDValue &Offset) {
auto AddrTy = Addr->getValueType(0);
if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
Base = CurDAG->getTargetFrameIndex(FIN->getIndex(), AddrTy);
@@ -97,7 +193,7 @@ bool VEDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base,
if (CurDAG->isBaseWithConstantOffset(Addr)) {
ConstantSDNode *CN = cast<ConstantSDNode>(Addr.getOperand(1));
- if (isInt<13>(CN->getSExtValue())) {
+ if (isInt<32>(CN->getSExtValue())) {
if (FrameIndexSDNode *FIN =
dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
// Constant offset from frame ref.
@@ -110,9 +206,7 @@ bool VEDAGToDAGISel::SelectADDRri(SDValue Addr, SDValue &Base,
return true;
}
}
- Base = Addr;
- Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
- return true;
+ return false;
}
void VEDAGToDAGISel::Select(SDNode *N) {
diff --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp
index cc24f2e29c59..ac60d62e0159 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -303,10 +303,13 @@ void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
/// any side effects other than loading from the stack slot.
unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (MI.getOpcode() == VE::LDSri || MI.getOpcode() == VE::LDLri ||
- MI.getOpcode() == VE::LDUri) {
+ if (MI.getOpcode() == VE::LDrii || // I64
+ MI.getOpcode() == VE::LDLSXrii || // I32
+ MI.getOpcode() == VE::LDUrii // F32
+ ) {
if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
- MI.getOperand(2).getImm() == 0) {
+ MI.getOperand(2).getImm() == 0 && MI.getOperand(3).isImm() &&
+ MI.getOperand(3).getImm() == 0) {
FrameIndex = MI.getOperand(1).getIndex();
return MI.getOperand(0).getReg();
}
@@ -321,12 +324,15 @@ unsigned VEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
/// any side effects other than storing to the stack slot.
unsigned VEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
int &FrameIndex) const {
- if (MI.getOpcode() == VE::STSri || MI.getOpcode() == VE::STLri ||
- MI.getOpcode() == VE::STUri) {
+ if (MI.getOpcode() == VE::STrii || // I64
+ MI.getOpcode() == VE::STLrii || // I32
+ MI.getOpcode() == VE::STUrii // F32
+ ) {
if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
- MI.getOperand(1).getImm() == 0) {
+ MI.getOperand(1).getImm() == 0 && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
FrameIndex = MI.getOperand(0).getIndex();
- return MI.getOperand(2).getReg();
+ return MI.getOperand(3).getReg();
}
}
return 0;
@@ -349,21 +355,24 @@ void VEInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
// On the order of operands here: think "[FrameIdx + 0] = SrcReg".
if (RC == &VE::I64RegClass) {
- BuildMI(MBB, I, DL, get(VE::STSri))
+ BuildMI(MBB, I, DL, get(VE::STrii))
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (RC == &VE::I32RegClass) {
- BuildMI(MBB, I, DL, get(VE::STLri))
+ BuildMI(MBB, I, DL, get(VE::STLrii))
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else if (RC == &VE::F32RegClass) {
- BuildMI(MBB, I, DL, get(VE::STUri))
+ BuildMI(MBB, I, DL, get(VE::STUrii))
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addReg(SrcReg, getKillRegState(isKill))
.addMemOperand(MMO);
} else
@@ -386,19 +395,22 @@ void VEInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
if (RC == &VE::I64RegClass) {
- BuildMI(MBB, I, DL, get(VE::LDSri), DestReg)
+ BuildMI(MBB, I, DL, get(VE::LDrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addMemOperand(MMO);
} else if (RC == &VE::I32RegClass) {
- BuildMI(MBB, I, DL, get(VE::LDLri), DestReg)
+ BuildMI(MBB, I, DL, get(VE::LDLSXrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addMemOperand(MMO);
} else if (RC == &VE::F32RegClass) {
- BuildMI(MBB, I, DL, get(VE::LDUri), DestReg)
+ BuildMI(MBB, I, DL, get(VE::LDUrii), DestReg)
.addFrameIndex(FI)
.addImm(0)
+ .addImm(0)
.addMemOperand(MMO);
} else
report_fatal_error("Can't load this register from stack slot");
@@ -487,13 +499,16 @@ bool VEInstrInfo::expandExtendStackPseudo(MachineInstr &MI) const {
// Update machine-CFG edges
BB->addSuccessor(sinkMBB);
- BuildMI(BB, dl, TII.get(VE::LDSri), VE::SX61)
+ BuildMI(BB, dl, TII.get(VE::LDrii), VE::SX61)
.addReg(VE::SX14)
+ .addImm(0)
.addImm(0x18);
BuildMI(BB, dl, TII.get(VE::ORri), VE::SX62)
.addReg(VE::SX0)
.addImm(0);
- BuildMI(BB, dl, TII.get(VE::LEAzzi), VE::SX63)
+ BuildMI(BB, dl, TII.get(VE::LEAzii), VE::SX63)
+ .addImm(0)
+ .addImm(0)
.addImm(0x13b);
BuildMI(BB, dl, TII.get(VE::SHMri))
.addReg(VE::SX61)
diff --git a/llvm/lib/Target/VE/VEInstrInfo.td b/llvm/lib/Target/VE/VEInstrInfo.td
index 38dfb28cae1b..a62cc92df134 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.td
+++ b/llvm/lib/Target/VE/VEInstrInfo.td
@@ -24,6 +24,10 @@ include "VEInstrFormats.td"
// Instruction Pattern Stuff
//===----------------------------------------------------------------------===//
+def LO7 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(SignExtend32(N->getSExtValue(), 7),
+ SDLoc(N), MVT::i32);
+}]>;
def simm7 : PatLeaf<(imm), [{ return isInt<7>(N->getSExtValue()); }]>;
def simm32 : PatLeaf<(imm), [{ return isInt<32>(N->getSExtValue()); }]>;
def uimm32 : PatLeaf<(imm), [{ return isUInt<32>(N->getZExtValue()); }]>;
@@ -131,17 +135,10 @@ def fcond2cc : SDNodeXForm<cond, [{
}]>;
// Addressing modes.
-def ADDRrr : ComplexPattern<iPTR, 2, "SelectADDRrr", [], []>;
-def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
-
-// ASX format of memory address
-def MEMrr : Operand<iPTR> {
- let PrintMethod = "printMemASXOperand";
- let MIOperandInfo = (ops ptr_rc, ptr_rc);
-}
+def ADDRri : ComplexPattern<iPTR, 2, "selectADDRri", [frameindex], []>;
def MEMri : Operand<iPTR> {
- let PrintMethod = "printMemASXOperand";
+ let PrintMethod = "printMemASOperandASX";
let MIOperandInfo = (ops ptr_rc, i64imm);
}
@@ -151,6 +148,74 @@ def MEMASri : Operand<iPTR> {
let MIOperandInfo = (ops ptr_rc, i64imm);
}
+// Addressing modes.
+// SX-Aurora has following fields.
+// sz: register or 0
+// sy: register or immediate (-64 to 63)
+// disp: immediate (-2147483648 to 2147483647)
+//
+// There are two kinds of instruction.
+// ASX format uses sz + sy + disp.
+// AS format uses sz + disp.
+//
+// Moreover, there are four kinds of assembly instruction format.
+// ASX format uses "disp", "disp(, sz)", "disp(sy)", "disp(sy, sz)",
+// "(, sz)", "(sy)", or "(sy, sz)".
+// AS format uses "disp", "disp(, sz)", or "(, sz)" in general.
+// AS format in RRM format uses "disp", "disp(sz)", or "(sz)".
+// AS format in RRM format for host memory access uses "sz", "(sz)",
+// or "disp(sz)".
+//
+// We defined them below.
+//
+// ASX format:
+// MEMrri, MEMrii, MEMzri, MEMzii
+// AS format:
+// well be added later.
+
+def ADDRrri : ComplexPattern<iPTR, 3, "selectADDRrri", [frameindex], []>;
+def ADDRrii : ComplexPattern<iPTR, 3, "selectADDRrii", [frameindex], []>;
+def ADDRzri : ComplexPattern<iPTR, 3, "selectADDRzri", [], []>;
+def ADDRzii : ComplexPattern<iPTR, 3, "selectADDRzii", [], []>;
+//
+// ASX assembly instrcution format:
+def VEMEMrriAsmOperand : AsmOperandClass {
+ let Name = "MEMrri";
+ let ParserMethod = "parseMEMOperand";
+}
+def VEMEMriiAsmOperand : AsmOperandClass {
+ let Name = "MEMrii";
+ let ParserMethod = "parseMEMOperand";
+}
+def VEMEMzriAsmOperand : AsmOperandClass {
+ let Name = "MEMzri";
+ let ParserMethod = "parseMEMOperand";
+}
+def VEMEMziiAsmOperand : AsmOperandClass {
+ let Name = "MEMzii";
+ let ParserMethod = "parseMEMOperand";
+}
+def MEMrri : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops ptr_rc, ptr_rc, i32imm);
+ let ParserMatchClass = VEMEMrriAsmOperand;
+}
+def MEMrii : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops ptr_rc, i32imm, i32imm);
+ let ParserMatchClass = VEMEMriiAsmOperand;
+}
+def MEMzri : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops i32imm /* = 0 */, ptr_rc, i32imm);
+ let ParserMatchClass = VEMEMzriAsmOperand;
+}
+def MEMzii : Operand<iPTR> {
+ let PrintMethod = "printMemASXOperand";
+ let MIOperandInfo = (ops i32imm /* = 0 */, i32imm, i32imm);
+ let ParserMatchClass = VEMEMziiAsmOperand;
+}
+
// Branch targets have OtherVT type.
def brtarget32 : Operand<OtherVT> {
let EncoderMethod = "getBranchTarget32OpValue";
@@ -257,48 +322,6 @@ def CC_AT : CC_VAL<21>; // Always true
// VE Multiclasses for common instruction formats
//===----------------------------------------------------------------------===//
-multiclass RMm<string opcStr, bits<8>opc,
- RegisterClass RC, ValueType Ty,
- Operand immOp, Operand immOp2,
- SDPatternOperator OpNode=null_frag> {
- def rri : RM<
- opc, (outs RC:$sx), (ins RC:$sy, RC:$sz, immOp2:$imm32),
- !strconcat(opcStr, " $sx, ${imm32}($sy, ${sz})")> {
- let cy = 1;
- let cz = 1;
- let hasSideEffects = 0;
- }
- def rzi : RM<
- opc, (outs RC:$sx), (ins RC:$sz, immOp2:$imm32),
- !strconcat(opcStr, " $sx, ${imm32}(${sz})"),
- [(set Ty:$sx, (OpNode Ty:$sz, (Ty simm32:$imm32)))]> {
- let cy = 0;
- let sy = 0;
- let cz = 1;
- let hasSideEffects = 0;
- }
- def zii : RM<
- opc, (outs RC:$sx), (ins immOp:$sy, immOp2:$imm32),
- !strconcat(opcStr, " $sx, ${imm32}(${sy})"),
- [/* Not define DAG pattern here to avoid llvm uses LEAzii for all add
- instructions.
- (set Ty:$sx, (OpNode (Ty simm7:$sy), (Ty simm32:$imm32))) */]> {
- let cy = 0;
- let cz = 0;
- let sz = 0;
- let hasSideEffects = 0;
- }
- def zzi : RM<
- opc, (outs RC:$sx), (ins immOp2:$imm32),
- !strconcat(opcStr, " $sx, $imm32")> {
- let cy = 0;
- let sy = 0;
- let cz = 0;
- let sz = 0;
- let hasSideEffects = 0;
- }
-}
-
multiclass RRmrr<string opcStr, bits<8>opc,
RegisterClass RCo, ValueType Tyo,
RegisterClass RCi, ValueType Tyi,
@@ -621,6 +644,149 @@ multiclass CVTm<string opcStr, bits<8> opc,
// Instructions
//===----------------------------------------------------------------------===//
+//-----------------------------------------------------------------------------
+// Section 8.2 - Load/Store instructions
+//-----------------------------------------------------------------------------
+
+// Multiclass for generic RM instructions
+multiclass RMm<string opcStr, bits<8>opc, RegisterClass RC> {
+ def rri : RM<opc, (outs RC:$dest), (ins MEMrri:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+ let cy = 0 in
+ def rii : RM<opc, (outs RC:$dest), (ins MEMrii:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+ let cz = 0 in
+ def zri : RM<opc, (outs RC:$dest), (ins MEMzri:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+ let cy = 0, cz = 0 in
+ def zii : RM<opc, (outs RC:$dest), (ins MEMzii:$addr),
+ !strconcat(opcStr, " $dest, $addr"), []>;
+}
+
+// Section 8.2.1 - LEA
+let cx = 0, DecoderMethod = "DecodeLoadI64" in
+defm LEA : RMm<"lea", 0x06, I64>;
+let cx = 1, DecoderMethod = "DecodeLoadI64" in
+defm LEASL : RMm<"lea.sl", 0x06, I64>;
+let cx = 0, DecoderMethod = "DecodeLoadI32", isCodeGenOnly = 1 in
+defm LEA32 : RMm<"lea", 0x06, I32>;
+
+def : Pat<(iPTR ADDRrri:$addr), (LEArri MEMrri:$addr)>;
+def : Pat<(iPTR ADDRrii:$addr), (LEArii MEMrii:$addr)>;
+def : Pat<(add I64:$base, simm32:$disp), (LEArii $base, 0, (LO32 $disp))>;
+def : Pat<(add I64:$base, lozero:$disp), (LEASLrii $base, 0, (HI32 $disp))>;
+def : Pat<(add I32:$base, simm32:$disp),
+ (LEA32rii (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $base, sub_i32), 0,
+ (LO32 $disp))>;
+
+def lea_add : PatFrags<(ops node:$base, node:$idx, node:$disp),
+ [(add (add node:$base, node:$idx), node:$disp),
+ (add (add node:$base, node:$disp), node:$idx)]>;
+def : Pat<(lea_add I64:$base, simm7:$idx, simm32:$disp),
+ (LEArii $base, (LO7 $idx), (LO32 $disp))>;
+def : Pat<(lea_add I64:$base, I64:$idx, simm32:$disp),
+ (LEArri $base, $idx, (LO32 $disp))>;
+def : Pat<(lea_add I64:$base, simm7:$idx, lozero:$disp),
+ (LEASLrii $base, (LO7 $idx), (HI32 $disp))>;
+def : Pat<(lea_add I64:$base, I64:$idx, lozero:$disp),
+ (LEASLrri $base, $idx, (HI32 $disp))>;
+
+// Multiclass for load instructions.
+let mayLoad = 1, hasSideEffects = 0 in
+multiclass LOADm<string opcStr, bits<8> opc, RegisterClass RC, ValueType Ty,
+ SDPatternOperator OpNode = null_frag> {
+ def rri : RM<opc, (outs RC:$dest), (ins MEMrri:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRrri:$addr))]>;
+ let cy = 0 in
+ def rii : RM<opc, (outs RC:$dest), (ins MEMrii:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRrii:$addr))]>;
+ let cz = 0 in
+ def zri : RM<opc, (outs RC:$dest), (ins MEMzri:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRzri:$addr))]>;
+ let cy = 0, cz = 0 in
+ def zii : RM<opc, (outs RC:$dest), (ins MEMzii:$addr),
+ !strconcat(opcStr, " $dest, $addr"),
+ [(set Ty:$dest, (OpNode ADDRzii:$addr))]>;
+}
+
+// Section 8.2.2 - LDS
+let DecoderMethod = "DecodeLoadI64" in
+defm LD : LOADm<"ld", 0x01, I64, i64, load>;
+def : Pat<(f64 (load ADDRrri:$addr)), (LDrri MEMrri:$addr)>;
+def : Pat<(f64 (load ADDRrii:$addr)), (LDrii MEMrii:$addr)>;
+def : Pat<(f64 (load ADDRzri:$addr)), (LDzri MEMzri:$addr)>;
+def : Pat<(f64 (load ADDRzii:$addr)), (LDzii MEMzii:$addr)>;
+
+// Section 8.2.3 - LDU
+let DecoderMethod = "DecodeLoadF32" in
+defm LDU : LOADm<"ldu", 0x02, F32, f32, load>;
+
+// Section 8.2.4 - LDL
+let DecoderMethod = "DecodeLoadI32" in
+defm LDLSX : LOADm<"ldl.sx", 0x03, I32, i32, load>;
+let cx = 1, DecoderMethod = "DecodeLoadI32" in
+defm LDLZX : LOADm<"ldl.zx", 0x03, I32, i32, load>;
+
+// Section 8.2.5 - LD2B
+let DecoderMethod = "DecodeLoadI16" in
+defm LD2BSX : LOADm<"ld2b.sx", 0x04, I32, i32, sextloadi16>;
+let cx = 1, DecoderMethod = "DecodeLoadI16" in
+defm LD2BZX : LOADm<"ld2b.zx", 0x04, I32, i32, zextloadi16>;
+
+// Section 8.2.6 - LD1B
+let DecoderMethod = "DecodeLoadI8" in
+defm LD1BSX : LOADm<"ld1b.sx", 0x05, I32, i32, sextloadi8>;
+let cx = 1, DecoderMethod = "DecodeLoadI8" in
+defm LD1BZX : LOADm<"ld1b.zx", 0x05, I32, i32, zextloadi8>;
+
+// Multiclass for store instructions.
+let mayStore = 1 in
+multiclass STOREm<string opcStr, bits<8> opc, RegisterClass RC, ValueType Ty,
+ SDPatternOperator OpNode = null_frag> {
+ def rri : RM<opc, (outs), (ins MEMrri:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRrri:$addr)]>;
+ let cy = 0 in
+ def rii : RM<opc, (outs), (ins MEMrii:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRrii:$addr)]>;
+ let cz = 0 in
+ def zri : RM<opc, (outs), (ins MEMzri:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRzri:$addr)]>;
+ let cy = 0, cz = 0 in
+ def zii : RM<opc, (outs), (ins MEMzii:$addr, RC:$sx),
+ !strconcat(opcStr, " $sx, $addr"),
+ [(OpNode Ty:$sx, ADDRzii:$addr)]>;
+}
+
+// Section 8.2.7 - STS
+let DecoderMethod = "DecodeStoreI64" in
+defm ST : STOREm<"st", 0x11, I64, i64, store>;
+def : Pat<(store f64:$src, ADDRrri:$addr), (STrri MEMrri:$addr, $src)>;
+def : Pat<(store f64:$src, ADDRrii:$addr), (STrii MEMrii:$addr, $src)>;
+def : Pat<(store f64:$src, ADDRzri:$addr), (STzri MEMzri:$addr, $src)>;
+def : Pat<(store f64:$src, ADDRzii:$addr), (STzii MEMzii:$addr, $src)>;
+
+// Section 8.2.8 - STU
+let DecoderMethod = "DecodeStoreF32" in
+defm STU : STOREm<"stu", 0x12, F32, f32, store>;
+
+// Section 8.2.9 - STL
+let DecoderMethod = "DecodeStoreI32" in
+defm STL : STOREm<"stl", 0x13, I32, i32, store>;
+
+// Section 8.2.10 - ST2B
+let DecoderMethod = "DecodeStoreI16" in
+defm ST2B : STOREm<"st2b", 0x14, I32, i32, truncstorei16>;
+
+// Section 8.2.11 - ST1B
+let DecoderMethod = "DecodeStoreI8" in
+defm ST1B : STOREm<"st1b", 0x15, I32, i32, truncstorei8>;
+
// CMOV instructions
let cx = 0, cw = 0, cw2 = 0 in
defm CMOVL : RRCMOVm<"cmov.l.${cf}", 0x3B, I64, i64, simm7Op64, uimm6Op64>;
@@ -635,22 +801,6 @@ let cx = 0, cw = 1, cw2 = 1 in
defm CMOVS : RRCMOVm<"cmov.s.${cf}", 0x3B, F32, f32, simm7Op64, uimm6Op32>;
-// LEA and LEASL instruction (load 32 bit imm to low or high part)
-let cx = 0 in
-defm LEA : RMm<"lea", 0x06, I64, i64, simm7Op64, simm32Op64, add>;
-let cx = 1 in
-defm LEASL : RMm<"lea.sl", 0x06, I64, i64, simm7Op64, simm32Op64>;
-let isCodeGenOnly = 1 in {
-let cx = 0 in
-defm LEA32 : RMm<"lea", 0x06, I32, i32, simm7Op32, simm32Op32, add>;
-}
-
-let cx = 0, cy = 1, cz = 0, sz = 0, hasSideEffects = 0 in {
- def LEAasx : RM<
- 0x06, (outs I64:$sx), (ins MEMri:$addr),
- "lea $sx,$addr", [(set iPTR:$sx, ADDRri:$addr)]>;
-}
-
// 5.3.2.2. Fixed-Point Arithmetic Operation Instructions
// ADD instruction
@@ -856,81 +1006,6 @@ let cz = 0, sz = 0 in {
defm CVD : CVTm<"cvt.d.s", 0x0F, I64, f64, F32, f32, simm7Op32, fpextend>;
}
-// Load and Store instructions
-// As 1st step, only uses sz and imm32 to represent $addr
-let mayLoad = 1, hasSideEffects = 0 in {
-let cy = 0, sy = 0, cz = 1 in {
-let cx = 0 in
-def LDSri : RM<
- 0x01, (outs I64:$sx), (ins MEMri:$addr),
- "ld $sx, $addr",
- [(set i64:$sx, (load ADDRri:$addr))]>;
-let cx = 0 in
-def LDUri : RM<
- 0x02, (outs F32:$sx), (ins MEMri:$addr),
- "ldu $sx, $addr",
- [(set f32:$sx, (load ADDRri:$addr))]>;
-let cx = 0 in
-def LDLri : RM<
- 0x03, (outs I32:$sx), (ins MEMri:$addr),
- "ldl.sx $sx, $addr",
- [(set i32:$sx, (load ADDRri:$addr))]>;
-let cx = 1 in
-def LDLUri : RM<
- 0x03, (outs I32:$sx), (ins MEMri:$addr),
- "ldl.zx $sx, $addr",
- [(set i32:$sx, (load ADDRri:$addr))]>;
-let cx = 0 in
-def LD2Bri : RM<
- 0x04, (outs I32:$sx), (ins MEMri:$addr),
- "ld2b.sx $sx, $addr",
- [(set i32:$sx, (sextloadi16 ADDRri:$addr))]>;
-let cx = 1 in
-def LD2BUri : RM<
- 0x04, (outs I32:$sx), (ins MEMri:$addr),
- "ld2b.zx $sx, $addr",
- [(set i32:$sx, (zextloadi16 ADDRri:$addr))]>;
-let cx = 0 in
-def LD1Bri : RM<
- 0x05, (outs I32:$sx), (ins MEMri:$addr),
- "ld1b.sx $sx, $addr",
- [(set i32:$sx, (sextloadi8 ADDRri:$addr))]>;
-let cx = 1 in
-def LD1BUri : RM<
- 0x05, (outs I32:$sx), (ins MEMri:$addr),
- "ld1b.zx $sx, $addr",
- [(set i32:$sx, (zextloadi8 ADDRri:$addr))]>;
-}
-}
-
-let mayStore = 1, hasSideEffects = 0 in {
-let cx = 0, cy = 0, sy = 0, cz = 1 in {
-def STSri : RM<
- 0x11, (outs), (ins MEMri:$addr, I64:$sx),
- "st $sx, $addr",
- [(store i64:$sx, ADDRri:$addr)]>;
-def STUri : RM<
- 0x12, (outs), (ins MEMri:$addr, F32:$sx),
- "stu $sx, $addr",
- [(store f32:$sx, ADDRri:$addr)]>;
-def STLri : RM<
- 0x13, (outs), (ins MEMri:$addr, I32:$sx),
- "stl $sx, $addr",
- [(store i32:$sx, ADDRri:$addr)]>;
-def ST2Bri : RM<
- 0x14, (outs), (ins MEMri:$addr, I32:$sx),
- "st2b $sx, $addr",
- [(truncstorei16 i32:$sx, ADDRri:$addr)]>;
-def ST1Bri : RM<
- 0x15, (outs), (ins MEMri:$addr, I32:$sx),
- "st1b $sx, $addr",
- [(truncstorei8 i32:$sx, ADDRri:$addr)]>;
-}
-}
-
-def : Pat<(f64 (load ADDRri:$addr)), (LDSri ADDRri:$addr)>;
-def : Pat<(store f64:$sx, ADDRri:$addr), (STSri ADDRri:$addr, $sx)>;
-
// Control-flow
// Jump instruction
@@ -946,10 +1021,6 @@ let cx = 0, cx2 = 0, bpf = 0 /* NONE */, cf = 15 /* AT */, cy = 0, sy = 0,
cz = 1,
isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1,
hasDelaySlot = 1, isCodeGenOnly = 1, hasSideEffects = 0 in {
-def BArr : CF<
- 0x19, (outs), (ins MEMrr:$addr),
- "b.l $addr",
- [(brind ADDRrr:$addr)]>;
def BAri : CF<
0x19, (outs), (ins MEMri:$addr),
"b.l $addr",
@@ -1035,31 +1106,31 @@ def CALLr : RM<
def : Pat<(i32 simm7:$val), (OR32im1 imm:$val, 0)>;
def : Pat<(i64 simm7:$val), (ORim1 imm:$val, 0)>;
// Medium immediates.
-def : Pat<(i32 simm32:$val), (LEA32zzi imm:$val)>;
-def : Pat<(i64 simm32:$val), (LEAzzi imm:$val)>;
-def : Pat<(i64 uimm32:$val), (ANDrm0 (LEAzzi imm:$val), 32)>;
+def : Pat<(i32 simm32:$val), (LEA32zii 0, 0, (LO32 $val))>;
+def : Pat<(i64 simm32:$val), (LEAzii 0, 0, (LO32 $val))>;
+def : Pat<(i64 uimm32:$val), (ANDrm0 (LEAzii 0, 0, (LO32 $val)), 32)>;
// Arbitrary immediates.
def : Pat<(i64 lozero:$val),
- (LEASLzzi (HI32 imm:$val))>;
+ (LEASLzii 0, 0, (HI32 imm:$val))>;
def : Pat<(i64 lomsbzero:$val),
- (LEASLrzi (LEAzzi (LO32 imm:$val)), (HI32 imm:$val))>;
+ (LEASLrii (LEAzii 0, 0, (LO32 imm:$val)), 0, (HI32 imm:$val))>;
def : Pat<(i64 imm:$val),
- (LEASLrzi (ANDrm0 (LEAzzi (LO32 imm:$val)), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, (LO32 imm:$val)), 32), 0,
(HI32 imm:$val))>;
// floating point
def : Pat<(f32 fpimm:$val),
- (COPY_TO_REGCLASS (LEASLzzi (LOFP32 $val)), F32)>;
+ (COPY_TO_REGCLASS (LEASLzii 0, 0, (LOFP32 $val)), F32)>;
def : Pat<(f64 fplozero:$val),
- (LEASLzzi (HIFP32 $val))>;
+ (LEASLzii 0, 0, (HIFP32 $val))>;
def : Pat<(f64 fplomsbzero:$val),
- (LEASLrzi (LEAzzi (LOFP32 $val)), (HIFP32 $val))>;
+ (LEASLrii (LEAzii 0, 0, (LOFP32 $val)), 0, (HIFP32 $val))>;
def : Pat<(f64 fpimm:$val),
- (LEASLrzi (ANDrm0 (LEAzzi (LOFP32 $val)), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, (LOFP32 $val)), 32), 0,
(HIFP32 $val))>;
// The same integer registers are used for i32 and i64 values.
-// When registers hold i32 values, the high bits are unused.
+// When registers hold i32 values, the high bits are unused.
// TODO Use standard expansion for shift-based lowering of sext_inreg
@@ -1111,56 +1182,88 @@ def : Pat<(i64 (anyext i32:$sy)),
// extload, sextload and zextload stuff
-def : Pat<(i64 (sextloadi8 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1Bri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (zextloadi8 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (sextloadi16 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2Bri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (zextloadi16 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (sextloadi32 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (zextloadi32 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (extloadi8 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD1BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (extloadi16 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LD2BUri MEMri:$addr), sub_i32)>;
-def : Pat<(i64 (extloadi32 ADDRri:$addr)),
- (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (LDLri MEMri:$addr), sub_i32)>;
+multiclass EXT64m<SDPatternOperator from,
+ SDPatternOperator torri,
+ SDPatternOperator torii,
+ SDPatternOperator tozri,
+ SDPatternOperator tozii> {
+ def : Pat<(i64 (from ADDRrri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (torri MEMrri:$addr),
+ sub_i32)>;
+ def : Pat<(i64 (from ADDRrii:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (torii MEMrii:$addr),
+ sub_i32)>;
+ def : Pat<(i64 (from ADDRzri:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (tozri MEMzri:$addr),
+ sub_i32)>;
+ def : Pat<(i64 (from ADDRzii:$addr)),
+ (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (tozii MEMzii:$addr),
+ sub_i32)>;
+}
+defm : EXT64m<sextloadi8, LD1BSXrri, LD1BSXrii, LD1BSXzri, LD1BSXzii>;
+defm : EXT64m<zextloadi8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
+defm : EXT64m<extloadi8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
+defm : EXT64m<sextloadi16, LD2BSXrri, LD2BSXrii, LD2BSXzri, LD2BSXzii>;
+defm : EXT64m<zextloadi16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
+defm : EXT64m<extloadi16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
+defm : EXT64m<sextloadi32, LDLSXrri, LDLSXrii, LDLSXzri, LDLSXzii>;
+defm : EXT64m<zextloadi32, LDLZXrri, LDLZXrii, LDLZXzri, LDLZXzii>;
+defm : EXT64m<extloadi32, LDLSXrri, LDLSXrii, LDLSXzri, LDLSXzii>;
// anyextload
-def : Pat<(extloadi8 ADDRri:$addr), (LD1BUri MEMri:$addr)>;
-def : Pat<(extloadi16 ADDRri:$addr), (LD2BUri MEMri:$addr)>;
+multiclass EXT32m<SDPatternOperator from,
+ SDPatternOperator torri,
+ SDPatternOperator torii,
+ SDPatternOperator tozri,
+ SDPatternOperator tozii> {
+ def : Pat<(from ADDRrri:$addr), (torri MEMrri:$addr)>;
+ def : Pat<(from ADDRrii:$addr), (torii MEMrii:$addr)>;
+ def : Pat<(from ADDRzri:$addr), (tozri MEMzri:$addr)>;
+ def : Pat<(from ADDRzii:$addr), (tozii MEMzii:$addr)>;
+}
+defm : EXT32m<extloadi8, LD1BZXrri, LD1BZXrii, LD1BZXzri, LD1BZXzii>;
+defm : EXT32m<extloadi16, LD2BZXrri, LD2BZXrii, LD2BZXzri, LD2BZXzii>;
// truncstore
-def : Pat<(truncstorei8 i64:$src, ADDRri:$addr),
- (ST1Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
-def : Pat<(truncstorei16 i64:$src, ADDRri:$addr),
- (ST2Bri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
-def : Pat<(truncstorei32 i64:$src, ADDRri:$addr),
- (STLri MEMri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+multiclass TRUNC64m<SDPatternOperator from,
+ SDPatternOperator torri,
+ SDPatternOperator torii,
+ SDPatternOperator tozri,
+ SDPatternOperator tozii> {
+ def : Pat<(from i64:$src, ADDRrri:$addr),
+ (torri MEMrri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+ def : Pat<(from i64:$src, ADDRrii:$addr),
+ (torii MEMrii:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+ def : Pat<(from i64:$src, ADDRzri:$addr),
+ (tozri MEMzri:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+ def : Pat<(from i64:$src, ADDRzii:$addr),
+ (tozii MEMzii:$addr, (EXTRACT_SUBREG $src, sub_i32))>;
+}
+defm : TRUNC64m<truncstorei8, ST1Brri, ST1Brii, ST1Bzri, ST1Bzii>;
+defm : TRUNC64m<truncstorei16, ST2Brri, ST2Brii, ST2Bzri, ST2Bzii>;
+defm : TRUNC64m<truncstorei32, STLrri, STLrii, STLzri, ST1Bzii>;
// Address calculation and its optimization
-def : Pat<(VEhi tglobaladdr:$in), (LEASLzzi tglobaladdr:$in)>;
-def : Pat<(VElo tglobaladdr:$in), (ANDrm0 (LEAzzi tglobaladdr:$in), 32)>;
+def : Pat<(VEhi tglobaladdr:$in), (LEASLzii 0, 0, tglobaladdr:$in)>;
+def : Pat<(VElo tglobaladdr:$in), (ANDrm0 (LEAzii 0, 0, tglobaladdr:$in), 32)>;
def : Pat<(add (VEhi tglobaladdr:$in1), (VElo tglobaladdr:$in2)),
- (LEASLrzi (ANDrm0 (LEAzzi tglobaladdr:$in2), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, tglobaladdr:$in2), 32), 0,
(tglobaladdr:$in1))>;
// GlobalTLS address calculation and its optimization
-def : Pat<(VEhi tglobaltlsaddr:$in), (LEASLzzi tglobaltlsaddr:$in)>;
-def : Pat<(VElo tglobaltlsaddr:$in), (ANDrm0 (LEAzzi tglobaltlsaddr:$in), 32)>;
+def : Pat<(VEhi tglobaltlsaddr:$in), (LEASLzii 0, 0, tglobaltlsaddr:$in)>;
+def : Pat<(VElo tglobaltlsaddr:$in),
+ (ANDrm0 (LEAzii 0, 0, tglobaltlsaddr:$in), 32)>;
def : Pat<(add (VEhi tglobaltlsaddr:$in1), (VElo tglobaltlsaddr:$in2)),
- (LEASLrzi (ANDrm0 (LEAzzi tglobaltlsaddr:$in2), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, tglobaltlsaddr:$in2), 32), 0,
(tglobaltlsaddr:$in1))>;
// Address calculation and its optimization
-def : Pat<(VEhi texternalsym:$in), (LEASLzzi texternalsym:$in)>;
-def : Pat<(VElo texternalsym:$in), (ANDrm0 (LEAzzi texternalsym:$in), 32)>;
+def : Pat<(VEhi texternalsym:$in), (LEASLzii 0, 0, texternalsym:$in)>;
+def : Pat<(VElo texternalsym:$in),
+ (ANDrm0 (LEAzii 0, 0, texternalsym:$in), 32)>;
def : Pat<(add (VEhi texternalsym:$in1), (VElo texternalsym:$in2)),
- (LEASLrzi (ANDrm0 (LEAzzi texternalsym:$in2), 32),
+ (LEASLrii (ANDrm0 (LEAzii 0, 0, texternalsym:$in2), 32), 0,
(texternalsym:$in1))>;
// Calls
diff --git a/llvm/lib/Target/VE/VERegisterInfo.cpp b/llvm/lib/Target/VE/VERegisterInfo.cpp
index 74ccc70d2ed8..e37a8c838513 100644
--- a/llvm/lib/Target/VE/VERegisterInfo.cpp
+++ b/llvm/lib/Target/VE/VERegisterInfo.cpp
@@ -90,7 +90,7 @@ static void replaceFI(MachineFunction &MF, MachineBasicBlock::iterator II,
// VE has 32 bit offset field, so no need to expand a target instruction.
// Directly encode it.
MI.getOperand(FIOperandNum).ChangeToRegister(FramePtr, false);
- MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ MI.getOperand(FIOperandNum + 2).ChangeToImmediate(Offset);
}
void VERegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
@@ -108,7 +108,7 @@ void VERegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
int Offset;
Offset = TFI->getFrameIndexReference(MF, FrameIndex, FrameReg);
- Offset += MI.getOperand(FIOperandNum + 1).getImm();
+ Offset += MI.getOperand(FIOperandNum + 2).getImm();
replaceFI(MF, II, MI, dl, FIOperandNum, Offset, FrameReg);
}
diff --git a/llvm/test/CodeGen/VE/addition.ll b/llvm/test/CodeGen/VE/addition.ll
index 87b64dd0ba27..e8f406e494f3 100644
--- a/llvm/test/CodeGen/VE/addition.ll
+++ b/llvm/test/CodeGen/VE/addition.ll
@@ -81,7 +81,7 @@ define i64 @func9(i64 %0, i64 %1) {
define signext i8 @func13(i8 signext %0) {
; CHECK-LABEL: func13:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 24
; CHECK-NEXT: sra.w.sx %s0, %s0, 24
; CHECK-NEXT: or %s11, 0, %s9
@@ -92,7 +92,7 @@ define signext i8 @func13(i8 signext %0) {
define signext i16 @func14(i16 signext %0) {
; CHECK-LABEL: func14:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 16
; CHECK-NEXT: sra.w.sx %s0, %s0, 16
; CHECK-NEXT: or %s11, 0, %s9
@@ -103,7 +103,7 @@ define signext i16 @func14(i16 signext %0) {
define i32 @func15(i32 %0) {
; CHECK-LABEL: func15:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add nsw i32 %0, 5
ret i32 %2
@@ -112,7 +112,7 @@ define i32 @func15(i32 %0) {
define i64 @func16(i64 %0) {
; CHECK-LABEL: func16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: lea %s0, 5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = add nsw i64 %0, 5
ret i64 %2
@@ -121,7 +121,7 @@ define i64 @func16(i64 %0) {
define zeroext i8 @func18(i8 zeroext %0) {
; CHECK-LABEL: func18:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: and %s0, %s0, (56)0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i8 %0, 5
@@ -131,7 +131,7 @@ define zeroext i8 @func18(i8 zeroext %0) {
define zeroext i16 @func19(i16 zeroext %0) {
; CHECK-LABEL: func19:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i16 %0, 5
@@ -141,7 +141,7 @@ define zeroext i16 @func19(i16 zeroext %0) {
define i32 @func20(i32 %0) {
; CHECK-LABEL: func20:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, 5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i32 %0, 5
ret i32 %2
@@ -150,7 +150,7 @@ define i32 @func20(i32 %0) {
define i64 @func21(i64 %0) {
; CHECK-LABEL: func21:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, 5(%s0)
+; CHECK-NEXT: lea %s0, 5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = add i64 %0, 5
ret i64 %2
diff --git a/llvm/test/CodeGen/VE/branch1.ll b/llvm/test/CodeGen/VE/branch1.ll
index a6fab7ae0fc6..baf23cd9430e 100644
--- a/llvm/test/CodeGen/VE/branch1.ll
+++ b/llvm/test/CodeGen/VE/branch1.ll
@@ -7,7 +7,7 @@ define signext i8 @func1(i8 signext %a, i8 signext %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB0_3
@@ -40,7 +40,7 @@ define i32 @func2(i16 signext %a, i16 signext %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB1_3
@@ -68,7 +68,7 @@ define i32 @func3(i32 %a, i32 %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB2_3
@@ -96,7 +96,7 @@ define i32 @func4(i64 %a, i64 %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB3_3
@@ -125,7 +125,7 @@ define i32 @func5(i8 zeroext %a, i8 zeroext %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB4_3
@@ -154,7 +154,7 @@ define i32 @func6(i16 zeroext %a, i16 zeroext %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB5_3
@@ -183,7 +183,7 @@ define i32 @func7(i32 %a, i32 %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB6_3
@@ -211,7 +211,7 @@ define i32 @func8(float %a, float %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB7_3
@@ -239,7 +239,7 @@ define i32 @func9(double %a, double %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB8_3
@@ -268,7 +268,7 @@ define i32 @func10(double %a, double %b) {
; CHECK-NEXT: # %bb.2:
; CHECK-NEXT: lea %s0, ret at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, ret at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, ret at hi(, %s0)
; CHECK-NEXT: or %s0, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: br.l .LBB9_3
diff --git a/llvm/test/CodeGen/VE/call.ll b/llvm/test/CodeGen/VE/call.ll
index c03f5bcf84be..4f0c7b50eb75 100644
--- a/llvm/test/CodeGen/VE/call.ll
+++ b/llvm/test/CodeGen/VE/call.ll
@@ -5,7 +5,7 @@ define i32 @sample_call() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, sample_add at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, sample_add at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, sample_add at hi(, %s0)
; CHECK-NEXT: or %s0, 1, (0)1
; CHECK-NEXT: or %s1, 2, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
@@ -20,11 +20,11 @@ define i32 @stack_call_int() {
; CHECK-LABEL: stack_call_int:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: or %s0, 10, (0)1
-; CHECK-NEXT: stl %s0, 248(,%s11)
+; CHECK-NEXT: stl %s0, 248(, %s11)
; CHECK-NEXT: or %s34, 9, (0)1
; CHECK-NEXT: lea %s0, stack_callee_int at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_int at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, stack_callee_int at hi(, %s0)
; CHECK-NEXT: or %s0, 1, (0)1
; CHECK-NEXT: or %s1, 2, (0)1
; CHECK-NEXT: or %s2, 3, (0)1
@@ -33,7 +33,7 @@ define i32 @stack_call_int() {
; CHECK-NEXT: or %s5, 6, (0)1
; CHECK-NEXT: or %s6, 7, (0)1
; CHECK-NEXT: or %s7, 8, (0)1
-; CHECK-NEXT: stl %s34, 240(,%s11)
+; CHECK-NEXT: stl %s34, 240(, %s11)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i32 @stack_callee_int(i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10)
@@ -46,11 +46,11 @@ define i32 @stack_call_int_szext() {
; CHECK-LABEL: stack_call_int_szext:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: or %s0, -1, (0)1
-; CHECK-NEXT: stl %s0, 248(,%s11)
+; CHECK-NEXT: stl %s0, 248(, %s11)
; CHECK-NEXT: lea %s34, 65535
; CHECK-NEXT: lea %s1, stack_callee_int_szext at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_int_szext at hi(%s1)
+; CHECK-NEXT: lea.sl %s12, stack_callee_int_szext at hi(, %s1)
; CHECK-NEXT: lea %s1, 255
; CHECK-NEXT: or %s2, 3, (0)1
; CHECK-NEXT: or %s3, 4, (0)1
@@ -58,7 +58,7 @@ define i32 @stack_call_int_szext() {
; CHECK-NEXT: or %s5, 6, (0)1
; CHECK-NEXT: or %s6, 7, (0)1
; CHECK-NEXT: or %s7, 8, (0)1
-; CHECK-NEXT: stl %s34, 240(,%s11)
+; CHECK-NEXT: stl %s34, 240(, %s11)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i32 @stack_callee_int_szext(i1 -1, i8 -1, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i16 -1, i8 -1)
@@ -71,11 +71,11 @@ define float @stack_call_float() {
; CHECK-LABEL: stack_call_float:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 1092616192
-; CHECK-NEXT: stl %s0, 252(,%s11)
+; CHECK-NEXT: stl %s0, 252(, %s11)
; CHECK-NEXT: lea %s0, 1091567616
; CHECK-NEXT: lea %s1, stack_callee_float at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_float at hi(%s1)
+; CHECK-NEXT: lea.sl %s12, stack_callee_float at hi(, %s1)
; CHECK-NEXT: lea.sl %s1, 1065353216
; CHECK-NEXT: lea.sl %s2, 1073741824
; CHECK-NEXT: lea.sl %s3, 1077936128
@@ -84,7 +84,7 @@ define float @stack_call_float() {
; CHECK-NEXT: lea.sl %s6, 1086324736
; CHECK-NEXT: lea.sl %s7, 1088421888
; CHECK-NEXT: lea.sl %s34, 1090519040
-; CHECK-NEXT: stl %s0, 244(,%s11)
+; CHECK-NEXT: stl %s0, 244(, %s11)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: or %s1, 0, %s2
; CHECK-NEXT: or %s2, 0, %s3
@@ -104,11 +104,11 @@ declare float @stack_callee_float(float, float, float, float, float, float, floa
define float @stack_call_float2(float %p0) {
; CHECK-LABEL: stack_call_float2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 252(,%s11)
+; CHECK-NEXT: stu %s0, 252(, %s11)
; CHECK-NEXT: lea %s1, stack_callee_float at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, stack_callee_float at hi(%s1)
-; CHECK-NEXT: stu %s0, 244(,%s11)
+; CHECK-NEXT: lea.sl %s12, stack_callee_float at hi(, %s1)
+; CHECK-NEXT: stu %s0, 244(, %s11)
; CHECK-NEXT: or %s1, 0, %s0
; CHECK-NEXT: or %s2, 0, %s0
; CHECK-NEXT: or %s3, 0, %s0
diff --git a/llvm/test/CodeGen/VE/callee.ll b/llvm/test/CodeGen/VE/callee.ll
index 08d271c6f9c5..b1dee4806ed0 100644
--- a/llvm/test/CodeGen/VE/callee.ll
+++ b/llvm/test/CodeGen/VE/callee.ll
@@ -3,7 +3,7 @@
define i32 @stack_stack_arg_i32_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i32 %5, i32 %6, i32 %7, i32 %8, i32 %9) {
; CHECK-LABEL: stack_stack_arg_i32_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 424(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 424(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret i32 %9
}
@@ -11,7 +11,7 @@ define i32 @stack_stack_arg_i32_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i32 %5,
define i64 @stack_stack_arg_i64_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i64 %5, i64 %6, i64 %7, i64 %8, i64 %9) {
; CHECK-LABEL: stack_stack_arg_i64_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 424(,%s11)
+; CHECK-NEXT: ld %s0, 424(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 %9
}
@@ -19,7 +19,7 @@ define i64 @stack_stack_arg_i64_r9(i1 %0, i8 %1, i16 %2, i32 %3, i64 %4, i64 %5,
define float @stack_stack_arg_f32_r9(float %p0, float %p1, float %p2, float %p3, float %p4, float %p5, float %p6, float %p7, float %s0, float %s1) {
; CHECK-LABEL: stack_stack_arg_f32_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 428(,%s11)
+; CHECK-NEXT: ldu %s0, 428(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret float %s1
}
@@ -27,7 +27,7 @@ define float @stack_stack_arg_f32_r9(float %p0, float %p1, float %p2, float %p3,
define i32 @stack_stack_arg_i32f32_r8(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) {
; CHECK-LABEL: stack_stack_arg_i32f32_r8:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 416(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 416(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret i32 %s0
}
@@ -35,7 +35,7 @@ define i32 @stack_stack_arg_i32f32_r8(i32 %p0, float %p1, i32 %p2, float %p3, i3
define float @stack_stack_arg_i32f32_r9(i32 %p0, float %p1, i32 %p2, float %p3, i32 %p4, float %p5, i32 %p6, float %p7, i32 %s0, float %s1) {
; CHECK-LABEL: stack_stack_arg_i32f32_r9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 428(,%s11)
+; CHECK-NEXT: ldu %s0, 428(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
ret float %s1
}
diff --git a/llvm/test/CodeGen/VE/callstruct.ll b/llvm/test/CodeGen/VE/callstruct.ll
index a76a9511f73f..c1d9d9f0d27f 100644
--- a/llvm/test/CodeGen/VE/callstruct.ll
+++ b/llvm/test/CodeGen/VE/callstruct.ll
@@ -8,8 +8,8 @@
define void @fun(%struct.a* noalias nocapture sret %a, i32 %p1, i32 %p2) {
; CHECK-LABEL: fun:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s1, (,%s0)
-; CHECK-NEXT: stl %s2, 4(,%s0)
+; CHECK-NEXT: stl %s1, (, %s0)
+; CHECK-NEXT: stl %s2, 4(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%a.zero = getelementptr inbounds %struct.a, %struct.a* %a, i64 0, i32 0
store i32 %p1, i32* %a.zero, align 4
@@ -24,16 +24,16 @@ define void @caller() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, callee at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, callee at hi(%s0)
-; CHECK-NEXT: lea %s0,-8(,%s9)
+; CHECK-NEXT: lea.sl %s12, callee at hi(, %s0)
+; CHECK-NEXT: lea %s0, -8(, %s9)
; CHECK-NEXT: or %s1, 3, (0)1
; CHECK-NEXT: or %s2, 4, (0)1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: ld %s0, -8(,%s9)
+; CHECK-NEXT: ld %s0, -8(, %s9)
; CHECK-NEXT: lea %s1, A at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, A at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, A at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i64, align 8
%a.bc = bitcast i64* %a to %struct.a*
diff --git a/llvm/test/CodeGen/VE/cast.ll b/llvm/test/CodeGen/VE/cast.ll
index d9fa2d56bb10..7c641679aeaa 100644
--- a/llvm/test/CodeGen/VE/cast.ll
+++ b/llvm/test/CodeGen/VE/cast.ll
@@ -422,7 +422,7 @@ define double @ull2d(i64 %x) {
; CHECK-NEXT: lea.sl %s2, 1160773632
; CHECK-NEXT: or %s1, %s1, %s2
; CHECK-NEXT: lea %s2, 1048576
-; CHECK-NEXT: lea.sl %s2, -986710016(%s2)
+; CHECK-NEXT: lea.sl %s2, -986710016(, %s2)
; CHECK-NEXT: fadd.d %s1, %s1, %s2
; CHECK-NEXT: lea %s2, -1
; CHECK-NEXT: and %s2, %s2, (32)0
diff --git a/llvm/test/CodeGen/VE/constants.ll b/llvm/test/CodeGen/VE/constants.ll
index 3c899156af1d..366b42f49393 100644
--- a/llvm/test/CodeGen/VE/constants.ll
+++ b/llvm/test/CodeGen/VE/constants.ll
@@ -247,7 +247,7 @@ define i64 @p15032385535i64() {
; CHECK-LABEL: p15032385535i64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 2147483647
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385535
}
@@ -256,7 +256,7 @@ define signext i64 @p15032385535si64() {
; CHECK-LABEL: p15032385535si64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 2147483647
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385535
}
@@ -265,7 +265,7 @@ define zeroext i64 @p15032385535zi64() {
; CHECK-LABEL: p15032385535zi64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 2147483647
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385535
}
@@ -275,7 +275,7 @@ define i64 @p15032385536i64() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -2147483648
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385536
}
@@ -285,7 +285,7 @@ define signext i64 @p15032385536si64() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -2147483648
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385536
}
@@ -295,7 +295,7 @@ define zeroext i64 @p15032385536zi64() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -2147483648
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 3(%s0)
+; CHECK-NEXT: lea.sl %s0, 3(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret i64 15032385536
}
@@ -330,7 +330,7 @@ define double @p2p3f64() {
; CHECK-LABEL: p2p3f64:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, 1717986918
-; CHECK-NEXT: lea.sl %s0, 1073899110(%s0)
+; CHECK-NEXT: lea.sl %s0, 1073899110(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret double 2.3
}
@@ -349,7 +349,7 @@ define double @p128p3f64() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, -1717986918
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, 1080035737(%s0)
+; CHECK-NEXT: lea.sl %s0, 1080035737(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
ret double 128.3
}
diff --git a/llvm/test/CodeGen/VE/ctlz.ll b/llvm/test/CodeGen/VE/ctlz.ll
index 0d0b2c3f6b0a..de44790014a0 100644
--- a/llvm/test/CodeGen/VE/ctlz.ll
+++ b/llvm/test/CodeGen/VE/ctlz.ll
@@ -31,7 +31,7 @@ define i16 @func3(i16 %p) {
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: sll %s0, %s0, 32
; CHECK-NEXT: ldz %s0, %s0
-; CHECK-NEXT: lea %s0, -16(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -16, %s0
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i16 @llvm.ctlz.i16(i16 %p, i1 true)
ret i16 %r
@@ -45,7 +45,7 @@ define i8 @func4(i8 %p) {
; CHECK-NEXT: and %s0, %s0, (56)0
; CHECK-NEXT: sll %s0, %s0, 32
; CHECK-NEXT: ldz %s0, %s0
-; CHECK-NEXT: lea %s0, -24(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -24, %s0
; CHECK-NEXT: or %s11, 0, %s9
%r = tail call i8 @llvm.ctlz.i8(i8 %p, i1 true)
ret i8 %r
diff --git a/llvm/test/CodeGen/VE/cttz.ll b/llvm/test/CodeGen/VE/cttz.ll
index 0b0399523ab7..82df4ee109c5 100644
--- a/llvm/test/CodeGen/VE/cttz.ll
+++ b/llvm/test/CodeGen/VE/cttz.ll
@@ -3,7 +3,7 @@
define i64 @func1(i64 %p) {
; CHECK-LABEL: func1:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: lea %s1, -1(, %s0)
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: pcnt %s0, %s0
@@ -17,7 +17,7 @@ declare i64 @llvm.cttz.i64(i64, i1)
define i32 @func2(i32 %p) {
; CHECK-LABEL: func2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: adds.w.sx %s1, -1, %s0
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: and %s0, %s0, (32)0
@@ -33,7 +33,7 @@ declare i32 @llvm.cttz.i32(i32, i1)
define i16 @func3(i16 %p) {
; CHECK-LABEL: func3:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: adds.w.sx %s1, -1, %s0
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: and %s0, %s0, (32)0
@@ -49,7 +49,7 @@ declare i16 @llvm.cttz.i16(i16, i1)
define i8 @func4(i8 %p) {
; CHECK-LABEL: func4:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s1, -1(%s0)
+; CHECK-NEXT: adds.w.sx %s1, -1, %s0
; CHECK-NEXT: xor %s0, -1, %s0
; CHECK-NEXT: and %s0, %s0, %s1
; CHECK-NEXT: and %s0, %s0, (32)0
diff --git a/llvm/test/CodeGen/VE/fp_add.ll b/llvm/test/CodeGen/VE/fp_add.ll
index 03e32e8f3b03..0c8df7c8f6aa 100644
--- a/llvm/test/CodeGen/VE/fp_add.ll
+++ b/llvm/test/CodeGen/VE/fp_add.ll
@@ -55,7 +55,7 @@ define double @func8(double %a) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, 2146435071(%s1)
+; CHECK-NEXT: lea.sl %s1, 2146435071(, %s1)
; CHECK-NEXT: fadd.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fadd double %a, 0x7FEFFFFFFFFFFFFF
diff --git a/llvm/test/CodeGen/VE/fp_div.ll b/llvm/test/CodeGen/VE/fp_div.ll
index 2a8c7dfdc773..912ffa05a572 100644
--- a/llvm/test/CodeGen/VE/fp_div.ll
+++ b/llvm/test/CodeGen/VE/fp_div.ll
@@ -55,7 +55,7 @@ define double @func8(double %a) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, 2146435071(%s1)
+; CHECK-NEXT: lea.sl %s1, 2146435071(, %s1)
; CHECK-NEXT: fdiv.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fdiv double %a, 0x7FEFFFFFFFFFFFFF
diff --git a/llvm/test/CodeGen/VE/fp_extload_truncstore.ll b/llvm/test/CodeGen/VE/fp_extload_truncstore.ll
index bedccef86bf7..9487e1ca9eb4 100644
--- a/llvm/test/CodeGen/VE/fp_extload_truncstore.ll
+++ b/llvm/test/CodeGen/VE/fp_extload_truncstore.ll
@@ -9,10 +9,10 @@ declare double @llvm.convert.from.fp16.f64(i16 %a)
define float @func_i16fp32(i16* %a) {
; CHECK-LABEL: func_i16fp32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%a.val = load i16, i16* %a, align 4
@@ -23,10 +23,10 @@ define float @func_i16fp32(i16* %a) {
define double @func_i16fp64(i16* %a) {
; CHECK-LABEL: func_i16fp64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: cvt.d.s %s0, %s0
; CHECK-NEXT: or %s11, 0, %s9
@@ -38,10 +38,10 @@ define double @func_i16fp64(i16* %a) {
define float @func_fp16fp32(half* %a) {
; CHECK-LABEL: func_fp16fp32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
%a.val = load half, half* %a, align 4
@@ -52,10 +52,10 @@ define float @func_fp16fp32(half* %a) {
define double @func_fp16fp64(half* %a) {
; CHECK-LABEL: func_fp16fp64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: lea %s1, __gnu_h2f_ieee at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(%s1)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(, %s1)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: cvt.d.s %s0, %s0
; CHECK-NEXT: or %s11, 0, %s9
@@ -67,15 +67,15 @@ define double @func_fp16fp64(half* %a) {
define void @func_fp32i16(i16* %fl.ptr, float %val) {
; CHECK-LABEL: func_fp32i16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __gnu_f2h_ieee at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee at hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s0, (,%s18)
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s0, (, %s18)
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = call i16 @llvm.convert.to.fp16.f32(float %val)
store i16 %val.asf, i16* %fl.ptr
@@ -85,23 +85,23 @@ define void @func_fp32i16(i16* %fl.ptr, float %val) {
define half @func_fp32fp16(half* %fl.ptr, float %a) {
; CHECK-LABEL: func_fp32fp16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
-; CHECK-NEXT: st %s19, 56(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s19, 56(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __gnu_f2h_ieee at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __gnu_f2h_ieee at hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s19, 0, %s0
; CHECK-NEXT: lea %s0, __gnu_h2f_ieee at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __gnu_h2f_ieee at hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s19
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s19, (,%s18)
-; CHECK-NEXT: ld %s19, 56(,%s9) # 8-byte Folded Reload
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s19, (, %s18)
+; CHECK-NEXT: ld %s19, 56(, %s9) # 8-byte Folded Reload
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%a.asd = fptrunc float %a to half
store half %a.asd, half* %fl.ptr
@@ -111,7 +111,7 @@ define half @func_fp32fp16(half* %fl.ptr, float %a) {
define double @func_fp32fp64(float* %a) {
; CHECK-LABEL: func_fp32fp64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: cvt.d.s %s0, %s0
; CHECK-NEXT: or %s11, 0, %s9
%a.val = load float, float* %a, align 4
@@ -122,15 +122,15 @@ define double @func_fp32fp64(float* %a) {
define void @func_fp64i16(i16* %fl.ptr, double %val) {
; CHECK-LABEL: func_fp64i16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __truncdfhf2 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __truncdfhf2 at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __truncdfhf2 at hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s0, (,%s18)
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s0, (, %s18)
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = call i16 @llvm.convert.to.fp16.f64(double %val)
store i16 %val.asf, i16* %fl.ptr
@@ -140,15 +140,15 @@ define void @func_fp64i16(i16* %fl.ptr, double %val) {
define void @func_fp64fp16(half* %fl.ptr, double %val) {
; CHECK-LABEL: func_fp64fp16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s18, 0, %s0
; CHECK-NEXT: lea %s0, __truncdfhf2 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, __truncdfhf2 at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, __truncdfhf2 at hi(, %s0)
; CHECK-NEXT: or %s0, 0, %s1
; CHECK-NEXT: bsic %lr, (,%s12)
-; CHECK-NEXT: st2b %s0, (,%s18)
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: st2b %s0, (, %s18)
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = fptrunc double %val to half
store half %val.asf, half* %fl.ptr
@@ -159,7 +159,7 @@ define void @func_fp64fp32(float* %fl.ptr, double %val) {
; CHECK-LABEL: func_fp64fp32:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: cvt.s.d %s1, %s1
-; CHECK-NEXT: stu %s1, (,%s0)
+; CHECK-NEXT: stu %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%val.asf = fptrunc double %val to float
store float %val.asf, float* %fl.ptr
diff --git a/llvm/test/CodeGen/VE/fp_mul.ll b/llvm/test/CodeGen/VE/fp_mul.ll
index 047c66d90c74..4a6c740eb7c8 100644
--- a/llvm/test/CodeGen/VE/fp_mul.ll
+++ b/llvm/test/CodeGen/VE/fp_mul.ll
@@ -55,7 +55,7 @@ define double @func8(double %a) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, 2146435071(%s1)
+; CHECK-NEXT: lea.sl %s1, 2146435071(, %s1)
; CHECK-NEXT: fmul.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fmul double %a, 0x7FEFFFFFFFFFFFFF
diff --git a/llvm/test/CodeGen/VE/fp_sub.ll b/llvm/test/CodeGen/VE/fp_sub.ll
index 6a4b803f5691..18e97e46aca8 100644
--- a/llvm/test/CodeGen/VE/fp_sub.ll
+++ b/llvm/test/CodeGen/VE/fp_sub.ll
@@ -55,7 +55,7 @@ define double @func8(double %a) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, -1
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, -1048577(%s1)
+; CHECK-NEXT: lea.sl %s1, -1048577(, %s1)
; CHECK-NEXT: fadd.d %s0, %s0, %s1
; CHECK-NEXT: or %s11, 0, %s9
%r = fadd double %a, 0xFFEFFFFFFFFFFFFF
diff --git a/llvm/test/CodeGen/VE/int_to_fp.ll b/llvm/test/CodeGen/VE/int_to_fp.ll
index 61b6ab69a5bc..a9b96c389412 100644
--- a/llvm/test/CodeGen/VE/int_to_fp.ll
+++ b/llvm/test/CodeGen/VE/int_to_fp.ll
@@ -188,7 +188,7 @@ define double @ul2d(i64 %a) {
; CHECK-NEXT: lea.sl %s2, 1160773632
; CHECK-NEXT: or %s1, %s1, %s2
; CHECK-NEXT: lea %s2, 1048576
-; CHECK-NEXT: lea.sl %s2, -986710016(%s2)
+; CHECK-NEXT: lea.sl %s2, -986710016(, %s2)
; CHECK-NEXT: fadd.d %s1, %s1, %s2
; CHECK-NEXT: lea %s2, -1
; CHECK-NEXT: and %s2, %s2, (32)0
diff --git a/llvm/test/CodeGen/VE/load-align1.ll b/llvm/test/CodeGen/VE/load-align1.ll
index eb4be9c6154c..4a90a30ea46a 100644
--- a/llvm/test/CodeGen/VE/load-align1.ll
+++ b/llvm/test/CodeGen/VE/load-align1.ll
@@ -11,7 +11,7 @@
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 1
%1 = load double, double* %addr, align 1
@@ -22,7 +22,7 @@ define double @loadf64stk() {
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: ldu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 1
%1 = load float, float* %addr, align 1
@@ -33,7 +33,7 @@ define float @loadf32stk() {
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 1
%1 = load i64, i64* %addr, align 1
@@ -44,7 +44,7 @@ define i64 @loadi64stk() {
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 1
%1 = load i32, i32* %addr, align 1
@@ -55,7 +55,7 @@ define i32 @loadi32stk() {
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 1
%1 = load i16, i16* %addr, align 1
@@ -66,7 +66,7 @@ define i16 @loadi16stk() {
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 1
%1 = load i8, i8* %addr, align 1
@@ -79,8 +79,8 @@ define double @loadf64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 1
ret double %1
@@ -92,8 +92,8 @@ define float @loadf32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32 at hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32 at hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 1
ret float %1
@@ -105,8 +105,8 @@ define i64 @loadi64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 1
ret i64 %1
@@ -118,8 +118,8 @@ define i32 @loadi32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32 at hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32 at hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 1
ret i32 %1
@@ -131,8 +131,8 @@ define i16 @loadi16com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16 at hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16 at hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 1
ret i16 %1
@@ -144,8 +144,8 @@ define i8 @loadi8com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8 at hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8 at hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 1
ret i8 %1
diff --git a/llvm/test/CodeGen/VE/load-align2.ll b/llvm/test/CodeGen/VE/load-align2.ll
index f0c545a77838..60ea2aa8909a 100644
--- a/llvm/test/CodeGen/VE/load-align2.ll
+++ b/llvm/test/CodeGen/VE/load-align2.ll
@@ -11,7 +11,7 @@
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 2
%1 = load double, double* %addr, align 2
@@ -22,7 +22,7 @@ define double @loadf64stk() {
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: ldu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 2
%1 = load float, float* %addr, align 2
@@ -33,7 +33,7 @@ define float @loadf32stk() {
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 2
%1 = load i64, i64* %addr, align 2
@@ -44,7 +44,7 @@ define i64 @loadi64stk() {
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 2
%1 = load i32, i32* %addr, align 2
@@ -55,7 +55,7 @@ define i32 @loadi32stk() {
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 2
%1 = load i16, i16* %addr, align 2
@@ -66,7 +66,7 @@ define i16 @loadi16stk() {
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 2
%1 = load i8, i8* %addr, align 2
@@ -79,8 +79,8 @@ define double @loadf64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 2
ret double %1
@@ -92,8 +92,8 @@ define float @loadf32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32 at hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32 at hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 2
ret float %1
@@ -105,8 +105,8 @@ define i64 @loadi64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 2
ret i64 %1
@@ -118,8 +118,8 @@ define i32 @loadi32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32 at hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32 at hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 2
ret i32 %1
@@ -131,8 +131,8 @@ define i16 @loadi16com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16 at hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16 at hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 2
ret i16 %1
@@ -144,8 +144,8 @@ define i8 @loadi8com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8 at hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8 at hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 2
ret i8 %1
diff --git a/llvm/test/CodeGen/VE/load-align4.ll b/llvm/test/CodeGen/VE/load-align4.ll
index 6f91ce21698b..7269aaa81f55 100644
--- a/llvm/test/CodeGen/VE/load-align4.ll
+++ b/llvm/test/CodeGen/VE/load-align4.ll
@@ -11,7 +11,7 @@
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 4
%1 = load double, double* %addr, align 4
@@ -22,7 +22,7 @@ define double @loadf64stk() {
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 188(,%s11)
+; CHECK-NEXT: ldu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 4
%1 = load float, float* %addr, align 4
@@ -33,7 +33,7 @@ define float @loadf32stk() {
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 4
%1 = load i64, i64* %addr, align 4
@@ -44,7 +44,7 @@ define i64 @loadi64stk() {
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 4
%1 = load i32, i32* %addr, align 4
@@ -55,7 +55,7 @@ define i32 @loadi32stk() {
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 188(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 4
%1 = load i16, i16* %addr, align 4
@@ -66,7 +66,7 @@ define i16 @loadi16stk() {
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 188(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 4
%1 = load i8, i8* %addr, align 4
@@ -79,8 +79,8 @@ define double @loadf64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 4
ret double %1
@@ -92,8 +92,8 @@ define float @loadf32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32 at hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32 at hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 4
ret float %1
@@ -105,8 +105,8 @@ define i64 @loadi64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 4
ret i64 %1
@@ -118,8 +118,8 @@ define i32 @loadi32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32 at hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32 at hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 4
ret i32 %1
@@ -131,8 +131,8 @@ define i16 @loadi16com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16 at hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16 at hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 4
ret i16 %1
@@ -144,8 +144,8 @@ define i8 @loadi8com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8 at hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8 at hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 4
ret i8 %1
diff --git a/llvm/test/CodeGen/VE/load-align8.ll b/llvm/test/CodeGen/VE/load-align8.ll
index ba0ad3965c14..191b04944156 100644
--- a/llvm/test/CodeGen/VE/load-align8.ll
+++ b/llvm/test/CodeGen/VE/load-align8.ll
@@ -11,7 +11,7 @@
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 8
%1 = load double, double* %addr, align 8
@@ -22,7 +22,7 @@ define double @loadf64stk() {
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 184(,%s11)
+; CHECK-NEXT: ldu %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 8
%1 = load float, float* %addr, align 8
@@ -33,7 +33,7 @@ define float @loadf32stk() {
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 184(,%s11)
+; CHECK-NEXT: ld %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 8
%1 = load i64, i64* %addr, align 8
@@ -44,7 +44,7 @@ define i64 @loadi64stk() {
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 184(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 8
%1 = load i32, i32* %addr, align 8
@@ -55,7 +55,7 @@ define i32 @loadi32stk() {
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 184(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 8
%1 = load i16, i16* %addr, align 8
@@ -66,7 +66,7 @@ define i16 @loadi16stk() {
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 184(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 8
%1 = load i8, i8* %addr, align 8
@@ -79,8 +79,8 @@ define double @loadf64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 8
ret double %1
@@ -92,8 +92,8 @@ define float @loadf32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32 at hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32 at hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 8
ret float %1
@@ -105,8 +105,8 @@ define i64 @loadi64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 8
ret i64 %1
@@ -118,8 +118,8 @@ define i32 @loadi32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32 at hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32 at hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 8
ret i32 %1
@@ -131,8 +131,8 @@ define i16 @loadi16com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16 at hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16 at hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 8
ret i16 %1
@@ -144,8 +144,8 @@ define i8 @loadi8com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8 at hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8 at hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 8
ret i8 %1
diff --git a/llvm/test/CodeGen/VE/load.ll b/llvm/test/CodeGen/VE/load.ll
index 96e8762cee19..a9e08299b621 100644
--- a/llvm/test/CodeGen/VE/load.ll
+++ b/llvm/test/CodeGen/VE/load.ll
@@ -4,7 +4,7 @@
define double @loadf64(double* nocapture readonly %0) {
; CHECK-LABEL: loadf64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load double, double* %0, align 16
ret double %2
@@ -14,7 +14,7 @@ define double @loadf64(double* nocapture readonly %0) {
define float @loadf32(float* nocapture readonly %0) {
; CHECK-LABEL: loadf32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load float, float* %0, align 16
ret float %2
@@ -24,7 +24,7 @@ define float @loadf32(float* nocapture readonly %0) {
define i64 @loadi64(i64* nocapture readonly %0) {
; CHECK-LABEL: loadi64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i64, i64* %0, align 16
ret i64 %2
@@ -34,7 +34,7 @@ define i64 @loadi64(i64* nocapture readonly %0) {
define i32 @loadi32(i32* nocapture readonly %0) {
; CHECK-LABEL: loadi32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i32, i32* %0, align 16
ret i32 %2
@@ -44,7 +44,7 @@ define i32 @loadi32(i32* nocapture readonly %0) {
define i64 @loadi32sext(i32* nocapture readonly %0) {
; CHECK-LABEL: loadi32sext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i32, i32* %0, align 16
%3 = sext i32 %2 to i64
@@ -55,7 +55,7 @@ define i64 @loadi32sext(i32* nocapture readonly %0) {
define i64 @loadi32zext(i32* nocapture readonly %0) {
; CHECK-LABEL: loadi32zext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.zx %s0, (,%s0)
+; CHECK-NEXT: ldl.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i32, i32* %0, align 16
%3 = zext i32 %2 to i64
@@ -66,7 +66,7 @@ define i64 @loadi32zext(i32* nocapture readonly %0) {
define i16 @loadi16(i16* nocapture readonly %0) {
; CHECK-LABEL: loadi16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i16, i16* %0, align 16
ret i16 %2
@@ -76,7 +76,7 @@ define i16 @loadi16(i16* nocapture readonly %0) {
define i64 @loadi16sext(i16* nocapture readonly %0) {
; CHECK-LABEL: loadi16sext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, (,%s0)
+; CHECK-NEXT: ld2b.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i16, i16* %0, align 16
%3 = sext i16 %2 to i64
@@ -87,7 +87,7 @@ define i64 @loadi16sext(i16* nocapture readonly %0) {
define i64 @loadi16zext(i16* nocapture readonly %0) {
; CHECK-LABEL: loadi16zext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i16, i16* %0, align 16
%3 = zext i16 %2 to i64
@@ -98,7 +98,7 @@ define i64 @loadi16zext(i16* nocapture readonly %0) {
define i8 @loadi8(i8* nocapture readonly %0) {
; CHECK-LABEL: loadi8:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i8, i8* %0, align 16
ret i8 %2
@@ -108,7 +108,7 @@ define i8 @loadi8(i8* nocapture readonly %0) {
define i64 @loadi8sext(i8* nocapture readonly %0) {
; CHECK-LABEL: loadi8sext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, (,%s0)
+; CHECK-NEXT: ld1b.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i8, i8* %0, align 16
%3 = sext i8 %2 to i64
@@ -119,7 +119,7 @@ define i64 @loadi8sext(i8* nocapture readonly %0) {
define i64 @loadi8zext(i8* nocapture readonly %0) {
; CHECK-LABEL: loadi8zext:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%2 = load i8, i8* %0, align 16
%3 = zext i8 %2 to i64
@@ -130,7 +130,7 @@ define i64 @loadi8zext(i8* nocapture readonly %0) {
define double @loadf64stk() {
; CHECK-LABEL: loadf64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 176(,%s11)
+; CHECK-NEXT: ld %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 16
%1 = load double, double* %addr, align 16
@@ -141,7 +141,7 @@ define double @loadf64stk() {
define float @loadf32stk() {
; CHECK-LABEL: loadf32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldu %s0, 176(,%s11)
+; CHECK-NEXT: ldu %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 16
%1 = load float, float* %addr, align 16
@@ -152,7 +152,7 @@ define float @loadf32stk() {
define i64 @loadi64stk() {
; CHECK-LABEL: loadi64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld %s0, 176(,%s11)
+; CHECK-NEXT: ld %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 16
%1 = load i64, i64* %addr, align 16
@@ -163,7 +163,7 @@ define i64 @loadi64stk() {
define i32 @loadi32stk() {
; CHECK-LABEL: loadi32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 176(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 16
%1 = load i32, i32* %addr, align 16
@@ -174,7 +174,7 @@ define i32 @loadi32stk() {
define i16 @loadi16stk() {
; CHECK-LABEL: loadi16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 176(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 16
%1 = load i16, i16* %addr, align 16
@@ -185,7 +185,7 @@ define i16 @loadi16stk() {
define i8 @loadi8stk() {
; CHECK-LABEL: loadi8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 176(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 16
%1 = load i8, i8* %addr, align 16
diff --git a/llvm/test/CodeGen/VE/load_gv.ll b/llvm/test/CodeGen/VE/load_gv.ll
index b90dd86e7ad9..6f5b3e19a824 100644
--- a/llvm/test/CodeGen/VE/load_gv.ll
+++ b/llvm/test/CodeGen/VE/load_gv.ll
@@ -13,8 +13,8 @@ define double @loadf64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load double, double* @vf64, align 8
ret double %1
@@ -26,8 +26,8 @@ define float @loadf32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vf32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vf32 at hi(%s0)
-; CHECK-NEXT: ldu %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vf32 at hi(, %s0)
+; CHECK-NEXT: ldu %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load float, float* @vf32, align 4
ret float %1
@@ -39,8 +39,8 @@ define i64 @loadi64com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi64 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi64 at hi(%s0)
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi64 at hi(, %s0)
+; CHECK-NEXT: ld %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i64, i64* @vi64, align 8
ret i64 %1
@@ -52,8 +52,8 @@ define i32 @loadi32com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi32 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi32 at hi(%s0)
-; CHECK-NEXT: ldl.sx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi32 at hi(, %s0)
+; CHECK-NEXT: ldl.sx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i32, i32* @vi32, align 4
ret i32 %1
@@ -65,8 +65,8 @@ define i16 @loadi16com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi16 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi16 at hi(%s0)
-; CHECK-NEXT: ld2b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi16 at hi(, %s0)
+; CHECK-NEXT: ld2b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i16, i16* @vi16, align 2
ret i16 %1
@@ -78,8 +78,8 @@ define i8 @loadi8com() {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s0, vi8 at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, vi8 at hi(%s0)
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, vi8 at hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i8, i8* @vi8, align 1
ret i8 %1
diff --git a/llvm/test/CodeGen/VE/pic_access_data.ll b/llvm/test/CodeGen/VE/pic_access_data.ll
index 0cfabe804880..9b9bdadc2f9b 100644
--- a/llvm/test/CodeGen/VE/pic_access_data.ll
+++ b/llvm/test/CodeGen/VE/pic_access_data.ll
@@ -13,23 +13,20 @@ define i32 @func() {
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, dst at got_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, dst at got_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld %s1, (,%s0)
+; CHECK-NEXT: lea.sl %s0, dst at got_hi(, %s0)
+; CHECK-NEXT: ld %s1, (%s0, %s15)
; CHECK-NEXT: lea %s0, ptr at got_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, ptr at got_hi(%s0)
; CHECK-NEXT: lea %s2, src at got_lo
; CHECK-NEXT: and %s2, %s2, (32)0
-; CHECK-NEXT: lea.sl %s2, src at got_hi(%s2)
-; CHECK-NEXT: adds.l %s2, %s15, %s2
-; CHECK-NEXT: ld %s2, (,%s2)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld %s0, (,%s0)
-; CHECK-NEXT: ldl.sx %s2, (,%s2)
-; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: lea.sl %s2, src at got_hi(, %s2)
+; CHECK-NEXT: ld %s2, (%s2, %s15)
+; CHECK-NEXT: lea.sl %s0, ptr at got_hi(, %s0)
+; CHECK-NEXT: ld %s0, (%s0, %s15)
+; CHECK-NEXT: ldl.sx %s2, (, %s2)
+; CHECK-NEXT: st %s1, (, %s0)
; CHECK-NEXT: or %s0, 1, (0)1
-; CHECK-NEXT: stl %s2, (,%s1)
+; CHECK-NEXT: stl %s2, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32* @dst, i32** @ptr, align 8
diff --git a/llvm/test/CodeGen/VE/pic_access_static_data.ll b/llvm/test/CodeGen/VE/pic_access_static_data.ll
index b95ae66a6f78..b8fcd42d4ac2 100644
--- a/llvm/test/CodeGen/VE/pic_access_static_data.ll
+++ b/llvm/test/CodeGen/VE/pic_access_static_data.ll
@@ -13,17 +13,15 @@ define void @func() {
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, src at gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, src at gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld1b.zx %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, src at gotoff_hi(, %s0)
+; CHECK-NEXT: ld1b.zx %s0, (%s0, %s15)
; CHECK-NEXT: or %s1, 0, (0)1
; CHECK-NEXT: lea %s2, 100
; CHECK-NEXT: cmov.w.ne %s1, %s2, %s0
; CHECK-NEXT: lea %s0, dst at gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, dst at gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: lea.sl %s0, dst at gotoff_hi(, %s0)
+; CHECK-NEXT: stl %s1, (%s0, %s15)
; CHECK-NEXT: or %s11, 0, %s9
%1 = load i1, i1* @src, align 4
@@ -42,10 +40,9 @@ define i32 @main() {
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, src at gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, src at gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
+; CHECK-NEXT: lea.sl %s0, src at gotoff_hi(, %s0)
; CHECK-NEXT: or %s1, 1, (0)1
-; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: st1b %s1, (%s0, %s15)
; CHECK-NEXT: lea %s12, func at plt_lo(-24)
; CHECK-NEXT: and %s12, %s12, (32)0
; CHECK-NEXT: sic %s16
@@ -53,19 +50,18 @@ define i32 @main() {
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: lea %s0, dst at gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, dst at gotoff_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ldl.sx %s1, (,%s0)
-; CHECK-NEXT: stl %s1, 184(,%s11)
+; CHECK-NEXT: lea.sl %s0, dst at gotoff_hi(, %s0)
+; CHECK-NEXT: ldl.sx %s1, (%s0, %s15)
+; CHECK-NEXT: stl %s1, 184(, %s11)
; CHECK-NEXT: lea %s0, .L.str at gotoff_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, .L.str at gotoff_hi(%s0)
+; CHECK-NEXT: lea.sl %s0, .L.str at gotoff_hi(, %s0)
; CHECK-NEXT: adds.l %s0, %s15, %s0
; CHECK-NEXT: lea %s12, printf at plt_lo(-24)
; CHECK-NEXT: and %s12, %s12, (32)0
; CHECK-NEXT: sic %s16
; CHECK-NEXT: lea.sl %s12, printf at plt_hi(%s16, %s12)
-; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: st %s0, 176(, %s11)
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s0, 0, (0)1
; CHECK-NEXT: or %s11, 0, %s9
diff --git a/llvm/test/CodeGen/VE/pic_indirect_func_call.ll b/llvm/test/CodeGen/VE/pic_indirect_func_call.ll
index 17069667029e..f1f3a6ea0acb 100644
--- a/llvm/test/CodeGen/VE/pic_indirect_func_call.ll
+++ b/llvm/test/CodeGen/VE/pic_indirect_func_call.ll
@@ -11,15 +11,13 @@ define void @func() {
; CHECK-NEXT: lea.sl %s15, _GLOBAL_OFFSET_TABLE_ at pc_hi(%s16, %s15)
; CHECK-NEXT: lea %s0, function at got_lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s0, function at got_hi(%s0)
-; CHECK-NEXT: adds.l %s0, %s15, %s0
-; CHECK-NEXT: ld %s0, (,%s0)
+; CHECK-NEXT: lea.sl %s0, function at got_hi(, %s0)
+; CHECK-NEXT: ld %s0, (%s0, %s15)
; CHECK-NEXT: lea %s1, ptr at got_lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, ptr at got_hi(%s1)
-; CHECK-NEXT: adds.l %s1, %s15, %s1
-; CHECK-NEXT: ld %s1, (,%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, ptr at got_hi(, %s1)
+; CHECK-NEXT: ld %s1, (%s1, %s15)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s12, 0, %s0
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s11, 0, %s9
diff --git a/llvm/test/CodeGen/VE/sext_zext_load.ll b/llvm/test/CodeGen/VE/sext_zext_load.ll
index aafa2ba94194..b9fc6bc4daf7 100644
--- a/llvm/test/CodeGen/VE/sext_zext_load.ll
+++ b/llvm/test/CodeGen/VE/sext_zext_load.ll
@@ -3,7 +3,7 @@
define signext i16 @func1() {
; CHECK-LABEL: func1:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -14,7 +14,7 @@ define signext i16 @func1() {
define i32 @func2() {
; CHECK-LABEL: func2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -25,7 +25,7 @@ define i32 @func2() {
define i64 @func3() {
; CHECK-LABEL: func3:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -36,7 +36,7 @@ define i64 @func3() {
define zeroext i16 @func5() {
; CHECK-LABEL: func5:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
@@ -48,7 +48,7 @@ define zeroext i16 @func5() {
define i32 @func6() {
; CHECK-LABEL: func6:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -59,7 +59,7 @@ define i32 @func6() {
define i64 @func7() {
; CHECK-LABEL: func7:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.sx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.sx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -70,7 +70,7 @@ define i64 @func7() {
define signext i16 @func9() {
; CHECK-LABEL: func9:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -81,7 +81,7 @@ define signext i16 @func9() {
define i32 @func10() {
; CHECK-LABEL: func10:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -92,7 +92,7 @@ define i32 @func10() {
define i64 @func11() {
; CHECK-LABEL: func11:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -103,7 +103,7 @@ define i64 @func11() {
define zeroext i16 @func13() {
; CHECK-LABEL: func13:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -114,7 +114,7 @@ define zeroext i16 @func13() {
define zeroext i16 @func14() {
; CHECK-LABEL: func14:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -125,7 +125,7 @@ define zeroext i16 @func14() {
define i64 @func15() {
; CHECK-LABEL: func15:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i8, align 1
%a.val = load i8, i8* %a, align 1
@@ -136,7 +136,7 @@ define i64 @func15() {
define i32 @func17() {
; CHECK-LABEL: func17:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.sx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
@@ -147,7 +147,7 @@ define i32 @func17() {
define i64 @func18() {
; CHECK-LABEL: func18:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.sx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
@@ -158,7 +158,7 @@ define i64 @func18() {
define zeroext i16 @func20() {
; CHECK-LABEL: func20:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.conv = load i16, i16* %a, align 2
@@ -168,7 +168,7 @@ define zeroext i16 @func20() {
define i64 @func21() {
; CHECK-LABEL: func21:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.sx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.sx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
@@ -179,7 +179,7 @@ define i64 @func21() {
define i32 @func23() {
; CHECK-LABEL: func23:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
@@ -190,7 +190,7 @@ define i32 @func23() {
define i64 @func24() {
; CHECK-LABEL: func24:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
@@ -201,7 +201,7 @@ define i64 @func24() {
define zeroext i16 @func26() {
; CHECK-LABEL: func26:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.conv = load i16, i16* %a, align 2
@@ -211,7 +211,7 @@ define zeroext i16 @func26() {
define i64 @func27() {
; CHECK-LABEL: func27:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld2b.zx %s0, 190(,%s11)
+; CHECK-NEXT: ld2b.zx %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i16, align 2
%a.val = load i16, i16* %a, align 2
@@ -222,7 +222,7 @@ define i64 @func27() {
define i64 @func29() {
; CHECK-LABEL: func29:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
@@ -233,7 +233,7 @@ define i64 @func29() {
define i64 @func31() {
; CHECK-LABEL: func31:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.sx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.sx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
@@ -244,7 +244,7 @@ define i64 @func31() {
define i64 @func33() {
; CHECK-LABEL: func33:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.zx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
@@ -255,7 +255,7 @@ define i64 @func33() {
define i64 @func35() {
; CHECK-LABEL: func35:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ldl.zx %s0, 188(,%s11)
+; CHECK-NEXT: ldl.zx %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i32, align 4
%a.val = load i32, i32* %a, align 4
@@ -266,7 +266,7 @@ define i64 @func35() {
define signext i8 @func37() {
; CHECK-LABEL: func37:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sla.w.sx %s0, %s0, 31
; CHECK-NEXT: sra.w.sx %s0, %s0, 31
; CHECK-NEXT: or %s11, 0, %s9
@@ -279,7 +279,7 @@ define signext i8 @func37() {
define signext i16 @func38() {
; CHECK-LABEL: func38:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sla.w.sx %s0, %s0, 31
; CHECK-NEXT: sra.w.sx %s0, %s0, 31
; CHECK-NEXT: or %s11, 0, %s9
@@ -292,7 +292,7 @@ define signext i16 @func38() {
define signext i32 @func39() {
; CHECK-LABEL: func39:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sla.w.sx %s0, %s0, 31
; CHECK-NEXT: sra.w.sx %s0, %s0, 31
; CHECK-NEXT: or %s11, 0, %s9
@@ -305,7 +305,7 @@ define signext i32 @func39() {
define signext i64 @func40() {
; CHECK-LABEL: func40:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: sll %s0, %s0, 63
; CHECK-NEXT: sra.l %s0, %s0, 63
; CHECK-NEXT: or %s11, 0, %s9
@@ -318,7 +318,7 @@ define signext i64 @func40() {
define signext i8 @func42() {
; CHECK-LABEL: func42:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
@@ -329,7 +329,7 @@ define signext i8 @func42() {
define signext i16 @func43() {
; CHECK-LABEL: func43:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
@@ -340,7 +340,7 @@ define signext i16 @func43() {
define signext i32 @func44() {
; CHECK-LABEL: func44:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
@@ -351,7 +351,7 @@ define signext i32 @func44() {
define signext i64 @func45() {
; CHECK-LABEL: func45:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: ld1b.zx %s0, 191(,%s11)
+; CHECK-NEXT: ld1b.zx %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%a = alloca i1, align 1
%a.val = load i1, i1* %a, align 1
diff --git a/llvm/test/CodeGen/VE/simple_prologue_epilogue.ll b/llvm/test/CodeGen/VE/simple_prologue_epilogue.ll
index 9e26d0aee6b7..70f6a3d9ae13 100644
--- a/llvm/test/CodeGen/VE/simple_prologue_epilogue.ll
+++ b/llvm/test/CodeGen/VE/simple_prologue_epilogue.ll
@@ -3,17 +3,17 @@
define void @func() {
; CHECK-LABEL: func:
; CHECK: # %bb.0:
-; CHECK-NEXT: st %s9, (,%s11)
-; CHECK-NEXT: st %s10, 8(,%s11)
-; CHECK-NEXT: st %s15, 24(,%s11)
-; CHECK-NEXT: st %s16, 32(,%s11)
+; CHECK-NEXT: st %s9, (, %s11)
+; CHECK-NEXT: st %s10, 8(, %s11)
+; CHECK-NEXT: st %s15, 24(, %s11)
+; CHECK-NEXT: st %s16, 32(, %s11)
; CHECK-NEXT: or %s9, 0, %s11
; CHECK-NEXT: lea %s13, -176
; CHECK-NEXT: and %s13, %s13, (32)0
-; CHECK-NEXT: lea.sl %s11, -1(%s11, %s13)
+; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11)
; CHECK-NEXT: brge.l %s11, %s8, .LBB0_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ld %s61, 24(,%s14)
+; CHECK-NEXT: ld %s61, 24(, %s14)
; CHECK-NEXT: or %s62, 0, %s0
; CHECK-NEXT: lea %s63, 315
; CHECK-NEXT: shm.l %s63, (%s61)
@@ -23,10 +23,10 @@ define void @func() {
; CHECK-NEXT: or %s0, 0, %s62
; CHECK-NEXT: .LBB0_2:
; CHECK-NEXT: or %s11, 0, %s9
-; CHECK-NEXT: ld %s16, 32(,%s11)
-; CHECK-NEXT: ld %s15, 24(,%s11)
-; CHECK-NEXT: ld %s10, 8(,%s11)
-; CHECK-NEXT: ld %s9, (,%s11)
+; CHECK-NEXT: ld %s16, 32(, %s11)
+; CHECK-NEXT: ld %s15, 24(, %s11)
+; CHECK-NEXT: ld %s10, 8(, %s11)
+; CHECK-NEXT: ld %s9, (, %s11)
; CHECK-NEXT: b.l (,%lr)
ret void
}
@@ -34,17 +34,17 @@ define void @func() {
define i64 @func1(i64) {
; CHECK-LABEL: func1:
; CHECK: # %bb.0:
-; CHECK-NEXT: st %s9, (,%s11)
-; CHECK-NEXT: st %s10, 8(,%s11)
-; CHECK-NEXT: st %s15, 24(,%s11)
-; CHECK-NEXT: st %s16, 32(,%s11)
+; CHECK-NEXT: st %s9, (, %s11)
+; CHECK-NEXT: st %s10, 8(, %s11)
+; CHECK-NEXT: st %s15, 24(, %s11)
+; CHECK-NEXT: st %s16, 32(, %s11)
; CHECK-NEXT: or %s9, 0, %s11
; CHECK-NEXT: lea %s13, -176
; CHECK-NEXT: and %s13, %s13, (32)0
-; CHECK-NEXT: lea.sl %s11, -1(%s11, %s13)
+; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11)
; CHECK-NEXT: brge.l %s11, %s8, .LBB1_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ld %s61, 24(,%s14)
+; CHECK-NEXT: ld %s61, 24(, %s14)
; CHECK-NEXT: or %s62, 0, %s0
; CHECK-NEXT: lea %s63, 315
; CHECK-NEXT: shm.l %s63, (%s61)
@@ -54,10 +54,10 @@ define i64 @func1(i64) {
; CHECK-NEXT: or %s0, 0, %s62
; CHECK-NEXT: .LBB1_2:
; CHECK-NEXT: or %s11, 0, %s9
-; CHECK-NEXT: ld %s16, 32(,%s11)
-; CHECK-NEXT: ld %s15, 24(,%s11)
-; CHECK-NEXT: ld %s10, 8(,%s11)
-; CHECK-NEXT: ld %s9, (,%s11)
+; CHECK-NEXT: ld %s16, 32(, %s11)
+; CHECK-NEXT: ld %s15, 24(, %s11)
+; CHECK-NEXT: ld %s10, 8(, %s11)
+; CHECK-NEXT: ld %s9, (, %s11)
; CHECK-NEXT: b.l (,%lr)
ret i64 %0
}
@@ -65,17 +65,17 @@ define i64 @func1(i64) {
define i64 @func2(i64, i64, i64, i64, i64) {
; CHECK-LABEL: func2:
; CHECK: # %bb.0:
-; CHECK-NEXT: st %s9, (,%s11)
-; CHECK-NEXT: st %s10, 8(,%s11)
-; CHECK-NEXT: st %s15, 24(,%s11)
-; CHECK-NEXT: st %s16, 32(,%s11)
+; CHECK-NEXT: st %s9, (, %s11)
+; CHECK-NEXT: st %s10, 8(, %s11)
+; CHECK-NEXT: st %s15, 24(, %s11)
+; CHECK-NEXT: st %s16, 32(, %s11)
; CHECK-NEXT: or %s9, 0, %s11
; CHECK-NEXT: lea %s13, -176
; CHECK-NEXT: and %s13, %s13, (32)0
-; CHECK-NEXT: lea.sl %s11, -1(%s11, %s13)
+; CHECK-NEXT: lea.sl %s11, -1(%s13, %s11)
; CHECK-NEXT: brge.l %s11, %s8, .LBB2_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: ld %s61, 24(,%s14)
+; CHECK-NEXT: ld %s61, 24(, %s14)
; CHECK-NEXT: or %s62, 0, %s0
; CHECK-NEXT: lea %s63, 315
; CHECK-NEXT: shm.l %s63, (%s61)
@@ -86,10 +86,10 @@ define i64 @func2(i64, i64, i64, i64, i64) {
; CHECK-NEXT: .LBB2_2:
; CHECK-NEXT: or %s0, 0, %s4
; CHECK-NEXT: or %s11, 0, %s9
-; CHECK-NEXT: ld %s16, 32(,%s11)
-; CHECK-NEXT: ld %s15, 24(,%s11)
-; CHECK-NEXT: ld %s10, 8(,%s11)
-; CHECK-NEXT: ld %s9, (,%s11)
+; CHECK-NEXT: ld %s16, 32(, %s11)
+; CHECK-NEXT: ld %s15, 24(, %s11)
+; CHECK-NEXT: ld %s10, 8(, %s11)
+; CHECK-NEXT: ld %s9, (, %s11)
; CHECK-NEXT: b.l (,%lr)
ret i64 %4
}
diff --git a/llvm/test/CodeGen/VE/store-align1.ll b/llvm/test/CodeGen/VE/store-align1.ll
index 8ed2734f9889..85b508295134 100644
--- a/llvm/test/CodeGen/VE/store-align1.ll
+++ b/llvm/test/CodeGen/VE/store-align1.ll
@@ -11,7 +11,7 @@
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 1
store double %0, double* %addr, align 1
@@ -22,7 +22,7 @@ define void @storef64stk(double %0) {
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: stu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 1
store float %0, float* %addr, align 1
@@ -33,7 +33,7 @@ define void @storef32stk(float %0) {
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 1
store i64 %0, i64* %addr, align 1
@@ -44,7 +44,7 @@ define void @storei64stk(i64 %0) {
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: stl %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 1
store i32 %0, i32* %addr, align 1
@@ -55,7 +55,7 @@ define void @storei32stk(i32 %0) {
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 190(,%s11)
+; CHECK-NEXT: st2b %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 1
store i16 %0, i16* %addr, align 1
@@ -66,7 +66,7 @@ define void @storei16stk(i16 %0) {
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 191(,%s11)
+; CHECK-NEXT: st1b %s0, 191(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 1
store i8 %0, i8* %addr, align 1
@@ -79,8 +79,8 @@ define void @storef64com(double %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 1
ret void
@@ -92,8 +92,8 @@ define void @storef32com(float %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32 at hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32 at hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 1
ret void
@@ -105,8 +105,8 @@ define void @storei64com(i64 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 1
ret void
@@ -118,8 +118,8 @@ define void @storei32com(i32 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32 at hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32 at hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 1
ret void
@@ -131,8 +131,8 @@ define void @storei16com(i16 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16 at hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16 at hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 1
ret void
@@ -144,8 +144,8 @@ define void @storei8com(i8 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8 at hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8 at hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 1
ret void
diff --git a/llvm/test/CodeGen/VE/store-align2.ll b/llvm/test/CodeGen/VE/store-align2.ll
index cfb5de30d938..fc21e8478c69 100644
--- a/llvm/test/CodeGen/VE/store-align2.ll
+++ b/llvm/test/CodeGen/VE/store-align2.ll
@@ -11,7 +11,7 @@
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 2
store double %0, double* %addr, align 2
@@ -22,7 +22,7 @@ define void @storef64stk(double %0) {
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: stu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 2
store float %0, float* %addr, align 2
@@ -33,7 +33,7 @@ define void @storef32stk(float %0) {
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 2
store i64 %0, i64* %addr, align 2
@@ -44,7 +44,7 @@ define void @storei64stk(i64 %0) {
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: stl %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 2
store i32 %0, i32* %addr, align 2
@@ -55,7 +55,7 @@ define void @storei32stk(i32 %0) {
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 190(,%s11)
+; CHECK-NEXT: st2b %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 2
store i16 %0, i16* %addr, align 2
@@ -66,7 +66,7 @@ define void @storei16stk(i16 %0) {
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 190(,%s11)
+; CHECK-NEXT: st1b %s0, 190(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 2
store i8 %0, i8* %addr, align 2
@@ -79,8 +79,8 @@ define void @storef64com(double %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 2
ret void
@@ -92,8 +92,8 @@ define void @storef32com(float %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32 at hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32 at hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 2
ret void
@@ -105,8 +105,8 @@ define void @storei64com(i64 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 2
ret void
@@ -118,8 +118,8 @@ define void @storei32com(i32 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32 at hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32 at hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 2
ret void
@@ -131,8 +131,8 @@ define void @storei16com(i16 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16 at hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16 at hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 2
ret void
@@ -144,8 +144,8 @@ define void @storei8com(i8 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8 at hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8 at hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 2
ret void
diff --git a/llvm/test/CodeGen/VE/store-align4.ll b/llvm/test/CodeGen/VE/store-align4.ll
index 195f765ea5eb..8ab4d40aa95b 100644
--- a/llvm/test/CodeGen/VE/store-align4.ll
+++ b/llvm/test/CodeGen/VE/store-align4.ll
@@ -11,7 +11,7 @@
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 4
store double %0, double* %addr, align 4
@@ -22,7 +22,7 @@ define void @storef64stk(double %0) {
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 188(,%s11)
+; CHECK-NEXT: stu %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 4
store float %0, float* %addr, align 4
@@ -33,7 +33,7 @@ define void @storef32stk(float %0) {
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 4
store i64 %0, i64* %addr, align 4
@@ -44,7 +44,7 @@ define void @storei64stk(i64 %0) {
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 188(,%s11)
+; CHECK-NEXT: stl %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 4
store i32 %0, i32* %addr, align 4
@@ -55,7 +55,7 @@ define void @storei32stk(i32 %0) {
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 188(,%s11)
+; CHECK-NEXT: st2b %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 4
store i16 %0, i16* %addr, align 4
@@ -66,7 +66,7 @@ define void @storei16stk(i16 %0) {
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 188(,%s11)
+; CHECK-NEXT: st1b %s0, 188(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 4
store i8 %0, i8* %addr, align 4
@@ -79,8 +79,8 @@ define void @storef64com(double %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 4
ret void
@@ -92,8 +92,8 @@ define void @storef32com(float %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32 at hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32 at hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 4
ret void
@@ -105,8 +105,8 @@ define void @storei64com(i64 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 4
ret void
@@ -118,8 +118,8 @@ define void @storei32com(i32 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32 at hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32 at hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 4
ret void
@@ -131,8 +131,8 @@ define void @storei16com(i16 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16 at hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16 at hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 4
ret void
@@ -144,8 +144,8 @@ define void @storei8com(i8 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8 at hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8 at hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 4
ret void
diff --git a/llvm/test/CodeGen/VE/store-align8.ll b/llvm/test/CodeGen/VE/store-align8.ll
index 330ae073f7f3..3da6562979b5 100644
--- a/llvm/test/CodeGen/VE/store-align8.ll
+++ b/llvm/test/CodeGen/VE/store-align8.ll
@@ -11,7 +11,7 @@
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 8
store double %0, double* %addr, align 8
@@ -22,7 +22,7 @@ define void @storef64stk(double %0) {
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 184(,%s11)
+; CHECK-NEXT: stu %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 8
store float %0, float* %addr, align 8
@@ -33,7 +33,7 @@ define void @storef32stk(float %0) {
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 184(,%s11)
+; CHECK-NEXT: st %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 8
store i64 %0, i64* %addr, align 8
@@ -44,7 +44,7 @@ define void @storei64stk(i64 %0) {
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 184(,%s11)
+; CHECK-NEXT: stl %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 8
store i32 %0, i32* %addr, align 8
@@ -55,7 +55,7 @@ define void @storei32stk(i32 %0) {
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 184(,%s11)
+; CHECK-NEXT: st2b %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 8
store i16 %0, i16* %addr, align 8
@@ -66,7 +66,7 @@ define void @storei16stk(i16 %0) {
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 184(,%s11)
+; CHECK-NEXT: st1b %s0, 184(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 8
store i8 %0, i8* %addr, align 8
@@ -79,8 +79,8 @@ define void @storef64com(double %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 8
ret void
@@ -92,8 +92,8 @@ define void @storef32com(float %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32 at hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32 at hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 8
ret void
@@ -105,8 +105,8 @@ define void @storei64com(i64 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 8
ret void
@@ -118,8 +118,8 @@ define void @storei32com(i32 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32 at hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32 at hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 8
ret void
@@ -131,8 +131,8 @@ define void @storei16com(i16 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16 at hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16 at hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 8
ret void
@@ -144,8 +144,8 @@ define void @storei8com(i8 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8 at hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8 at hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 8
ret void
diff --git a/llvm/test/CodeGen/VE/store.ll b/llvm/test/CodeGen/VE/store.ll
index 984d2cb4df92..b3b5d26087e7 100644
--- a/llvm/test/CodeGen/VE/store.ll
+++ b/llvm/test/CodeGen/VE/store.ll
@@ -4,7 +4,7 @@
define void @storef64(double* nocapture %0, double %1) {
; CHECK-LABEL: storef64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: st %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store double %1, double* %0, align 16
ret void
@@ -14,7 +14,7 @@ define void @storef64(double* nocapture %0, double %1) {
define void @storef32(float* nocapture %0, float %1) {
; CHECK-LABEL: storef32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s1, (,%s0)
+; CHECK-NEXT: stu %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store float %1, float* %0, align 16
ret void
@@ -24,7 +24,7 @@ define void @storef32(float* nocapture %0, float %1) {
define void @storei64(i64* nocapture %0, i64 %1) {
; CHECK-LABEL: storei64:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s1, (,%s0)
+; CHECK-NEXT: st %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %1, i64* %0, align 16
ret void
@@ -34,7 +34,7 @@ define void @storei64(i64* nocapture %0, i64 %1) {
define void @storei32(i32* nocapture %0, i32 %1) {
; CHECK-LABEL: storei32:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: stl %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %1, i32* %0, align 16
ret void
@@ -44,7 +44,7 @@ define void @storei32(i32* nocapture %0, i32 %1) {
define void @storei32tr(i32* nocapture %0, i64 %1) {
; CHECK-LABEL: storei32tr:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s1, (,%s0)
+; CHECK-NEXT: stl %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = trunc i64 %1 to i32
store i32 %3, i32* %0, align 16
@@ -55,7 +55,7 @@ define void @storei32tr(i32* nocapture %0, i64 %1) {
define void @storei16(i16* nocapture %0, i16 %1) {
; CHECK-LABEL: storei16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s1, (,%s0)
+; CHECK-NEXT: st2b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %1, i16* %0, align 16
ret void
@@ -65,7 +65,7 @@ define void @storei16(i16* nocapture %0, i16 %1) {
define void @storei16tr(i16* nocapture %0, i64 %1) {
; CHECK-LABEL: storei16tr:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s1, (,%s0)
+; CHECK-NEXT: st2b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = trunc i64 %1 to i16
store i16 %3, i16* %0, align 16
@@ -76,7 +76,7 @@ define void @storei16tr(i16* nocapture %0, i64 %1) {
define void @storei8(i8* nocapture %0, i8 %1) {
; CHECK-LABEL: storei8:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: st1b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %1, i8* %0, align 16
ret void
@@ -86,7 +86,7 @@ define void @storei8(i8* nocapture %0, i8 %1) {
define void @storei8tr(i8* nocapture %0, i64 %1) {
; CHECK-LABEL: storei8tr:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s1, (,%s0)
+; CHECK-NEXT: st1b %s1, (, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = trunc i64 %1 to i8
store i8 %3, i8* %0, align 16
@@ -97,7 +97,7 @@ define void @storei8tr(i8* nocapture %0, i64 %1) {
define void @storef64stk(double %0) {
; CHECK-LABEL: storef64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: st %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca double, align 16
store double %0, double* %addr, align 16
@@ -108,7 +108,7 @@ define void @storef64stk(double %0) {
define void @storef32stk(float %0) {
; CHECK-LABEL: storef32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stu %s0, 176(,%s11)
+; CHECK-NEXT: stu %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca float, align 16
store float %0, float* %addr, align 16
@@ -119,7 +119,7 @@ define void @storef32stk(float %0) {
define void @storei64stk(i64 %0) {
; CHECK-LABEL: storei64stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s0, 176(,%s11)
+; CHECK-NEXT: st %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i64, align 16
store i64 %0, i64* %addr, align 16
@@ -130,7 +130,7 @@ define void @storei64stk(i64 %0) {
define void @storei32stk(i32 %0) {
; CHECK-LABEL: storei32stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, 176(,%s11)
+; CHECK-NEXT: stl %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i32, align 16
store i32 %0, i32* %addr, align 16
@@ -141,7 +141,7 @@ define void @storei32stk(i32 %0) {
define void @storei16stk(i16 %0) {
; CHECK-LABEL: storei16stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, 176(,%s11)
+; CHECK-NEXT: st2b %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i16, align 16
store i16 %0, i16* %addr, align 16
@@ -152,7 +152,7 @@ define void @storei16stk(i16 %0) {
define void @storei8stk(i8 %0) {
; CHECK-LABEL: storei8stk:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, 176(,%s11)
+; CHECK-NEXT: st1b %s0, 176(, %s11)
; CHECK-NEXT: or %s11, 0, %s9
%addr = alloca i8, align 16
store i8 %0, i8* %addr, align 16
diff --git a/llvm/test/CodeGen/VE/store_gv.ll b/llvm/test/CodeGen/VE/store_gv.ll
index 033b44c02099..4405e9e8cab1 100644
--- a/llvm/test/CodeGen/VE/store_gv.ll
+++ b/llvm/test/CodeGen/VE/store_gv.ll
@@ -13,8 +13,8 @@ define void @storef64com(double %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store double %0, double* @vf64, align 8
ret void
@@ -26,8 +26,8 @@ define void @storef32com(float %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vf32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vf32 at hi(%s1)
-; CHECK-NEXT: stu %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vf32 at hi(, %s1)
+; CHECK-NEXT: stu %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store float %0, float* @vf32, align 4
ret void
@@ -39,8 +39,8 @@ define void @storei64com(i64 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi64 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi64 at hi(%s1)
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi64 at hi(, %s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i64 %0, i64* @vi64, align 8
ret void
@@ -52,8 +52,8 @@ define void @storei32com(i32 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi32 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi32 at hi(%s1)
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi32 at hi(, %s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i32 %0, i32* @vi32, align 4
ret void
@@ -65,8 +65,8 @@ define void @storei16com(i16 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi16 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi16 at hi(%s1)
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi16 at hi(, %s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i16 %0, i16* @vi16, align 2
ret void
@@ -78,8 +78,8 @@ define void @storei8com(i8 %0) {
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: lea %s1, vi8 at lo
; CHECK-NEXT: and %s1, %s1, (32)0
-; CHECK-NEXT: lea.sl %s1, vi8 at hi(%s1)
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: lea.sl %s1, vi8 at hi(, %s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
store i8 %0, i8* @vi8, align 1
ret void
diff --git a/llvm/test/CodeGen/VE/subtraction.ll b/llvm/test/CodeGen/VE/subtraction.ll
index d5ee586aca57..74af4af86f60 100644
--- a/llvm/test/CodeGen/VE/subtraction.ll
+++ b/llvm/test/CodeGen/VE/subtraction.ll
@@ -81,7 +81,7 @@ define i64 @func9(i64 %0, i64 %1) {
define signext i8 @func13(i8 signext %0, i8 signext %1) {
; CHECK-LABEL: func13:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 24
; CHECK-NEXT: sra.w.sx %s0, %s0, 24
; CHECK-NEXT: or %s11, 0, %s9
@@ -92,7 +92,7 @@ define signext i8 @func13(i8 signext %0, i8 signext %1) {
define signext i16 @func14(i16 signext %0, i16 signext %1) {
; CHECK-LABEL: func14:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: sla.w.sx %s0, %s0, 16
; CHECK-NEXT: sra.w.sx %s0, %s0, 16
; CHECK-NEXT: or %s11, 0, %s9
@@ -103,7 +103,7 @@ define signext i16 @func14(i16 signext %0, i16 signext %1) {
define i32 @func15(i32 %0, i32 %1) {
; CHECK-LABEL: func15:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add nsw i32 %0, -5
ret i32 %3
@@ -112,7 +112,7 @@ define i32 @func15(i32 %0, i32 %1) {
define i64 @func16(i64 %0, i64 %1) {
; CHECK-LABEL: func16:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: lea %s0, -5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = add nsw i64 %0, -5
ret i64 %3
@@ -121,7 +121,7 @@ define i64 @func16(i64 %0, i64 %1) {
define zeroext i8 @func18(i8 zeroext %0, i8 zeroext %1) {
; CHECK-LABEL: func18:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: and %s0, %s0, (56)0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i8 %0, -5
@@ -131,7 +131,7 @@ define zeroext i8 @func18(i8 zeroext %0, i8 zeroext %1) {
define zeroext i16 @func19(i16 zeroext %0, i16 zeroext %1) {
; CHECK-LABEL: func19:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: and %s0, %s0, (48)0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i16 %0, -5
@@ -141,7 +141,7 @@ define zeroext i16 @func19(i16 zeroext %0, i16 zeroext %1) {
define i32 @func20(i32 %0, i32 %1) {
; CHECK-LABEL: func20:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: adds.w.sx %s0, -5, %s0
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i32 %0, -5
ret i32 %3
@@ -150,7 +150,7 @@ define i32 @func20(i32 %0, i32 %1) {
define i64 @func21(i64 %0, i64 %1) {
; CHECK-LABEL: func21:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -5(%s0)
+; CHECK-NEXT: lea %s0, -5(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = add i64 %0, -5
ret i64 %3
@@ -169,7 +169,7 @@ define i32 @func25(i32 %0, i32 %1) {
define i64 @func26(i64 %0, i64 %1) {
; CHECK-LABEL: func26:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: lea %s0, -2147483648(%s0)
+; CHECK-NEXT: lea %s0, -2147483648(, %s0)
; CHECK-NEXT: or %s11, 0, %s9
%3 = add nsw i64 %0, -2147483648
ret i64 %3
diff --git a/llvm/test/CodeGen/VE/tls.ll b/llvm/test/CodeGen/VE/tls.ll
index 9b1672295eef..5807f39566c8 100644
--- a/llvm/test/CodeGen/VE/tls.ll
+++ b/llvm/test/CodeGen/VE/tls.ll
@@ -11,26 +11,7 @@
; Function Attrs: norecurse nounwind readnone
define nonnull i32* @get_global() {
; GENDYN-LABEL: get_global:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB0_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB0_2: # %entry
+; GENDYN: .LBB{{[0-9]+}}_2:
; GENDYN-NEXT: lea %s0, x at tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
; GENDYN-NEXT: sic %s10
@@ -40,33 +21,9 @@ define nonnull i32* @get_global() {
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: get_global:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB0_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB0_2: # %entry
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
; GENDYNPIC-NEXT: sic %s16
@@ -80,11 +37,6 @@ define nonnull i32* @get_global() {
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: get_global:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, x at tpoff_lo
@@ -99,26 +51,7 @@ entry:
; Function Attrs: norecurse nounwind readnone
define nonnull i32* @get_local() {
; GENDYN-LABEL: get_local:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB1_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB1_2: # %entry
+; GENDYN: .LBB{{[0-9]+}}_2:
; GENDYN-NEXT: lea %s0, y at tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
; GENDYN-NEXT: sic %s10
@@ -128,33 +61,9 @@ define nonnull i32* @get_local() {
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: get_local:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB1_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB1_2: # %entry
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
; GENDYNPIC-NEXT: sic %s16
@@ -168,11 +77,6 @@ define nonnull i32* @get_local() {
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: get_local:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, y at tpoff_lo
@@ -187,27 +91,8 @@ entry:
; Function Attrs: norecurse nounwind
define void @set_global(i32 %v) {
; GENDYN-LABEL: set_global:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB2_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB2_2: # %entry
-; GENDYN-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYN: .LBB{{[0-9]+}}_2:
+; GENDYN-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYN-NEXT: or %s18, 0, %s0
; GENDYN-NEXT: lea %s0, x at tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
@@ -217,37 +102,13 @@ define void @set_global(i32 %v) {
; GENDYN-NEXT: and %s12, %s12, (32)0
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
-; GENDYN-NEXT: stl %s18, (,%s0)
-; GENDYN-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYN-NEXT: stl %s18, (, %s0)
+; GENDYN-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: set_global:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB2_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB2_2: # %entry
-; GENDYNPIC-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
+; GENDYNPIC-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYNPIC-NEXT: or %s18, 0, %s0
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
@@ -261,21 +122,16 @@ define void @set_global(i32 %v) {
; GENDYNPIC-NEXT: and %s12, %s12, (32)0
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
-; GENDYNPIC-NEXT: stl %s18, (,%s0)
-; GENDYNPIC-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYNPIC-NEXT: stl %s18, (, %s0)
+; GENDYNPIC-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: set_global:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, x at tpoff_lo
; LOCAL-NEXT: and %s34, %s34, (32)0
; LOCAL-NEXT: lea.sl %s34, x at tpoff_hi(%s34)
; LOCAL-NEXT: adds.l %s34, %s14, %s34
-; LOCAL-NEXT: stl %s0, (,%s34)
+; LOCAL-NEXT: stl %s0, (, %s34)
; LOCAL-NEXT: or %s11, 0, %s9
entry:
store i32 %v, i32* @x, align 4
@@ -285,27 +141,8 @@ entry:
; Function Attrs: norecurse nounwind
define void @set_local(i32 %v) {
; GENDYN-LABEL: set_local:
-; GENDYN: # %bb.0: # %entry
-; GENDYN-NEXT: st %s9, (,%s11)
-; GENDYN-NEXT: st %s10, 8(,%s11)
-; GENDYN-NEXT: st %s15, 24(,%s11)
-; GENDYN-NEXT: st %s16, 32(,%s11)
-; GENDYN-NEXT: or %s9, 0, %s11
-; GENDYN-NEXT: lea %s13, -240
-; GENDYN-NEXT: and %s13, %s13, (32)0
-; GENDYN-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYN-NEXT: brge.l %s11, %s8, .LBB3_2
-; GENDYN-NEXT: # %bb.1: # %entry
-; GENDYN-NEXT: ld %s61, 24(,%s14)
-; GENDYN-NEXT: or %s62, 0, %s0
-; GENDYN-NEXT: lea %s63, 315
-; GENDYN-NEXT: shm.l %s63, (%s61)
-; GENDYN-NEXT: shm.l %s8, 8(%s61)
-; GENDYN-NEXT: shm.l %s11, 16(%s61)
-; GENDYN-NEXT: monc
-; GENDYN-NEXT: or %s0, 0, %s62
-; GENDYN-NEXT: .LBB3_2: # %entry
-; GENDYN-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYN: .LBB{{[0-9]+}}_2:
+; GENDYN-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYN-NEXT: or %s18, 0, %s0
; GENDYN-NEXT: lea %s0, y at tls_gd_lo(-24)
; GENDYN-NEXT: and %s0, %s0, (32)0
@@ -315,37 +152,13 @@ define void @set_local(i32 %v) {
; GENDYN-NEXT: and %s12, %s12, (32)0
; GENDYN-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYN-NEXT: bsic %s10, (, %s12)
-; GENDYN-NEXT: stl %s18, (,%s0)
-; GENDYN-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYN-NEXT: stl %s18, (, %s0)
+; GENDYN-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYN-NEXT: or %s11, 0, %s9
-; GENDYN-NEXT: ld %s16, 32(,%s11)
-; GENDYN-NEXT: ld %s15, 24(,%s11)
-; GENDYN-NEXT: ld %s10, 8(,%s11)
-; GENDYN-NEXT: ld %s9, (,%s11)
-; GENDYN-NEXT: b.l (,%lr)
;
; GENDYNPIC-LABEL: set_local:
-; GENDYNPIC: # %bb.0: # %entry
-; GENDYNPIC-NEXT: st %s9, (,%s11)
-; GENDYNPIC-NEXT: st %s10, 8(,%s11)
-; GENDYNPIC-NEXT: st %s15, 24(,%s11)
-; GENDYNPIC-NEXT: st %s16, 32(,%s11)
-; GENDYNPIC-NEXT: or %s9, 0, %s11
-; GENDYNPIC-NEXT: lea %s13, -240
-; GENDYNPIC-NEXT: and %s13, %s13, (32)0
-; GENDYNPIC-NEXT: lea.sl %s11, -1(%s11, %s13)
-; GENDYNPIC-NEXT: brge.l %s11, %s8, .LBB3_2
-; GENDYNPIC-NEXT: # %bb.1: # %entry
-; GENDYNPIC-NEXT: ld %s61, 24(,%s14)
-; GENDYNPIC-NEXT: or %s62, 0, %s0
-; GENDYNPIC-NEXT: lea %s63, 315
-; GENDYNPIC-NEXT: shm.l %s63, (%s61)
-; GENDYNPIC-NEXT: shm.l %s8, 8(%s61)
-; GENDYNPIC-NEXT: shm.l %s11, 16(%s61)
-; GENDYNPIC-NEXT: monc
-; GENDYNPIC-NEXT: or %s0, 0, %s62
-; GENDYNPIC-NEXT: .LBB3_2: # %entry
-; GENDYNPIC-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; GENDYNPIC: .LBB{{[0-9]+}}_2:
+; GENDYNPIC-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; GENDYNPIC-NEXT: or %s18, 0, %s0
; GENDYNPIC-NEXT: lea %s15, _GLOBAL_OFFSET_TABLE_ at pc_lo(-24)
; GENDYNPIC-NEXT: and %s15, %s15, (32)0
@@ -359,21 +172,16 @@ define void @set_local(i32 %v) {
; GENDYNPIC-NEXT: and %s12, %s12, (32)0
; GENDYNPIC-NEXT: lea.sl %s12, __tls_get_addr at plt_hi(%s10, %s12)
; GENDYNPIC-NEXT: bsic %s10, (, %s12)
-; GENDYNPIC-NEXT: stl %s18, (,%s0)
-; GENDYNPIC-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; GENDYNPIC-NEXT: stl %s18, (, %s0)
+; GENDYNPIC-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; GENDYNPIC-NEXT: or %s11, 0, %s9
-; GENDYNPIC-NEXT: ld %s16, 32(,%s11)
-; GENDYNPIC-NEXT: ld %s15, 24(,%s11)
-; GENDYNPIC-NEXT: ld %s10, 8(,%s11)
-; GENDYNPIC-NEXT: ld %s9, (,%s11)
-; GENDYNPIC-NEXT: b.l (,%lr)
; LOCAL-LABEL: set_local:
; LOCAL: .LBB{{[0-9]+}}_2:
; LOCAL-NEXT: lea %s34, y at tpoff_lo
; LOCAL-NEXT: and %s34, %s34, (32)0
; LOCAL-NEXT: lea.sl %s34, y at tpoff_hi(%s34)
; LOCAL-NEXT: adds.l %s34, %s14, %s34
-; LOCAL-NEXT: stl %s0, (,%s34)
+; LOCAL-NEXT: stl %s0, (, %s34)
; LOCAL-NEXT: or %s11, 0, %s9
entry:
store i32 %v, i32* @y, align 4
diff --git a/llvm/test/CodeGen/VE/truncstore.ll b/llvm/test/CodeGen/VE/truncstore.ll
index 573d3df90d58..357cc6b11791 100644
--- a/llvm/test/CodeGen/VE/truncstore.ll
+++ b/llvm/test/CodeGen/VE/truncstore.ll
@@ -3,7 +3,7 @@
define void @func0(i1 signext %p, i8* %a) {
; CHECK-LABEL: func0:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st1b %s0, (,%s1)
+; CHECK-NEXT: st1b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i1 %p to i8
store i8 %p.conv, i8* %a, align 2
@@ -13,7 +13,7 @@ define void @func0(i1 signext %p, i8* %a) {
define void @func1(i8 signext %p, i16* %a) {
; CHECK-LABEL: func1:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st2b %s0, (,%s1)
+; CHECK-NEXT: st2b %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i8 %p to i16
store i16 %p.conv, i16* %a, align 2
@@ -23,7 +23,7 @@ define void @func1(i8 signext %p, i16* %a) {
define void @func2(i8 signext %p, i32* %a) {
; CHECK-LABEL: func2:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i8 %p to i32
store i32 %p.conv, i32* %a, align 4
@@ -34,7 +34,7 @@ define void @func3(i8 signext %p, i64* %a) {
; CHECK-LABEL: func3:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i8 %p to i64
store i64 %p.conv, i64* %a, align 8
@@ -44,7 +44,7 @@ define void @func3(i8 signext %p, i64* %a) {
define void @func5(i16 signext %p, i32* %a) {
; CHECK-LABEL: func5:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: stl %s0, (,%s1)
+; CHECK-NEXT: stl %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i16 %p to i32
store i32 %p.conv, i32* %a, align 4
@@ -55,7 +55,7 @@ define void @func6(i16 signext %p, i64* %a) {
; CHECK-LABEL: func6:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i16 %p to i64
store i64 %p.conv, i64* %a, align 8
@@ -66,7 +66,7 @@ define void @func8(i32 %p, i64* %a) {
; CHECK-LABEL: func8:
; CHECK: .LBB{{[0-9]+}}_2:
; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1
-; CHECK-NEXT: st %s0, (,%s1)
+; CHECK-NEXT: st %s0, (, %s1)
; CHECK-NEXT: or %s11, 0, %s9
%p.conv = sext i32 %p to i64
store i64 %p.conv, i64* %a, align 8
diff --git a/llvm/test/CodeGen/VE/va_arg.ll b/llvm/test/CodeGen/VE/va_arg.ll
index 00387aaeece9..1eaa8d3f5053 100644
--- a/llvm/test/CodeGen/VE/va_arg.ll
+++ b/llvm/test/CodeGen/VE/va_arg.ll
@@ -13,16 +13,16 @@
define i32 @func_vainout(i32, ...) {
; CHECK-LABEL: func_vainout:
-; CHECK: ldl.sx %s1, 184(,%s9)
-; CHECK: ld2b.sx %s18, 192(,%s9)
-; CHECK: ld1b.sx %s19, 200(,%s9)
-; CHECK: ldl.sx %s20, 208(,%s9)
-; CHECK: ld2b.zx %s21, 216(,%s9)
-; CHECK: ld1b.zx %s22, 224(,%s9)
-; CHECK: ldu %s23, 236(,%s9)
-; CHECK: ld %s24, 240(,%s9)
-; CHECK: ld %s25, 248(,%s9)
-; CHECK: ld %s26, 256(,%s9)
+; CHECK: ldl.sx %s1, 184(, %s9)
+; CHECK: ld2b.sx %s18, 192(, %s9)
+; CHECK: ld1b.sx %s19, 200(, %s9)
+; CHECK: ldl.sx %s20, 208(, %s9)
+; CHECK: ld2b.zx %s21, 216(, %s9)
+; CHECK: ld1b.zx %s22, 224(, %s9)
+; CHECK: ldu %s23, 236(, %s9)
+; CHECK: ld %s24, 240(, %s9)
+; CHECK: ld %s25, 248(, %s9)
+; CHECK: ld %s26, 256(, %s9)
%a = alloca i8*, align 8
%a8 = bitcast i8** %a to i8*
diff --git a/llvm/test/CodeGen/VE/va_callee.ll b/llvm/test/CodeGen/VE/va_callee.ll
index 6a705243ba09..ecdf05dc0f3a 100644
--- a/llvm/test/CodeGen/VE/va_callee.ll
+++ b/llvm/test/CodeGen/VE/va_callee.ll
@@ -2,15 +2,15 @@
define i32 @va_func(i32, ...) {
; CHECK-LABEL: va_func:
-; CHECK: ldl.sx %s0, 184(,%s9)
-; CHECK: ld2b.sx %s18, 192(,%s9)
-; CHECK: ld1b.sx %s19, 200(,%s9)
-; CHECK: ldl.sx %s20, 208(,%s9)
-; CHECK: ld2b.zx %s21, 216(,%s9)
-; CHECK: ld1b.zx %s22, 224(,%s9)
-; CHECK: ldu %s23, 236(,%s9)
-; CHECK: ld %s24, 240(,%s9)
-; CHECK: ld %s25, 248(,%s9)
+; CHECK: ldl.sx %s0, 184(, %s9)
+; CHECK: ld2b.sx %s18, 192(, %s9)
+; CHECK: ld1b.sx %s19, 200(, %s9)
+; CHECK: ldl.sx %s20, 208(, %s9)
+; CHECK: ld2b.zx %s21, 216(, %s9)
+; CHECK: ld1b.zx %s22, 224(, %s9)
+; CHECK: ldu %s23, 236(, %s9)
+; CHECK: ld %s24, 240(, %s9)
+; CHECK: ld %s25, 248(, %s9)
%va = alloca i8*, align 8
%va.i8 = bitcast i8** %va to i8*
diff --git a/llvm/test/CodeGen/VE/va_caller.ll b/llvm/test/CodeGen/VE/va_caller.ll
index a8b5fe81d732..414fce57dee4 100644
--- a/llvm/test/CodeGen/VE/va_caller.ll
+++ b/llvm/test/CodeGen/VE/va_caller.ll
@@ -5,42 +5,42 @@ declare i32 @func(i32, ...)
define i32 @caller() {
; CHECK-LABEL: caller:
; CHECK: .LBB{{[0-9]+}}_2:
-; CHECK-NEXT: st %s18, 48(,%s9) # 8-byte Folded Spill
+; CHECK-NEXT: st %s18, 48(, %s9) # 8-byte Folded Spill
; CHECK-NEXT: or %s7, 0, (0)1
-; CHECK-NEXT: st %s7, 280(,%s11)
+; CHECK-NEXT: st %s7, 280(, %s11)
; CHECK-NEXT: or %s0, 11, (0)1
-; CHECK-NEXT: st %s0, 272(,%s11)
-; CHECK-NEXT: st %s7, 264(,%s11)
+; CHECK-NEXT: st %s0, 272(, %s11)
+; CHECK-NEXT: st %s7, 264(, %s11)
; CHECK-NEXT: or %s0, 10, (0)1
-; CHECK-NEXT: st %s0, 256(,%s11)
+; CHECK-NEXT: st %s0, 256(, %s11)
; CHECK-NEXT: lea.sl %s0, 1075970048
-; CHECK-NEXT: st %s0, 248(,%s11)
+; CHECK-NEXT: st %s0, 248(, %s11)
; CHECK-NEXT: or %s0, 8, (0)1
-; CHECK-NEXT: st %s0, 240(,%s11)
-; CHECK-NEXT: st %s7, 232(,%s11)
+; CHECK-NEXT: st %s0, 240(, %s11)
+; CHECK-NEXT: st %s7, 232(, %s11)
; CHECK-NEXT: lea %s0, 1086324736
-; CHECK-NEXT: stl %s0, 228(,%s11)
+; CHECK-NEXT: stl %s0, 228(, %s11)
; CHECK-NEXT: or %s5, 5, (0)1
-; CHECK-NEXT: stl %s5, 216(,%s11)
+; CHECK-NEXT: stl %s5, 216(, %s11)
; CHECK-NEXT: or %s4, 4, (0)1
-; CHECK-NEXT: stl %s4, 208(,%s11)
+; CHECK-NEXT: stl %s4, 208(, %s11)
; CHECK-NEXT: or %s3, 3, (0)1
-; CHECK-NEXT: stl %s3, 200(,%s11)
+; CHECK-NEXT: stl %s3, 200(, %s11)
; CHECK-NEXT: or %s2, 2, (0)1
-; CHECK-NEXT: stl %s2, 192(,%s11)
+; CHECK-NEXT: stl %s2, 192(, %s11)
; CHECK-NEXT: or %s1, 1, (0)1
-; CHECK-NEXT: stl %s1, 184(,%s11)
+; CHECK-NEXT: stl %s1, 184(, %s11)
; CHECK-NEXT: or %s18, 0, (0)1
; CHECK-NEXT: lea %s0, func at lo
; CHECK-NEXT: and %s0, %s0, (32)0
-; CHECK-NEXT: lea.sl %s12, func at hi(%s0)
+; CHECK-NEXT: lea.sl %s12, func at hi(, %s0)
; CHECK-NEXT: lea.sl %s0, 1086324736
-; CHECK-NEXT: stl %s18, 176(,%s11)
+; CHECK-NEXT: stl %s18, 176(, %s11)
; CHECK-NEXT: or %s6, 0, %s0
; CHECK-NEXT: or %s0, 0, %s18
; CHECK-NEXT: bsic %lr, (,%s12)
; CHECK-NEXT: or %s0, 0, %s18
-; CHECK-NEXT: ld %s18, 48(,%s9) # 8-byte Folded Reload
+; CHECK-NEXT: ld %s18, 48(, %s9) # 8-byte Folded Reload
; CHECK-NEXT: or %s11, 0, %s9
call i32 (i32, ...) @func(i32 0, i16 1, i8 2, i32 3, i16 4, i8 5, float 6.0, i8* null, i64 8, double 9.0, i128 10, i128 11)
ret i32 0
More information about the llvm-commits
mailing list