[llvm] 3a906a9 - [VE] i<N> and fp32/64 arguments, return values and constants

Simon Moll via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 22 00:18:32 PST 2020


Author: Kazushi (Jam) Marukawa
Date: 2020-01-22T09:17:44+01:00
New Revision: 3a906a9f4e6b51130d9a1bdd5f9a7ec3705d3faa

URL: https://github.com/llvm/llvm-project/commit/3a906a9f4e6b51130d9a1bdd5f9a7ec3705d3faa
DIFF: https://github.com/llvm/llvm-project/commit/3a906a9f4e6b51130d9a1bdd5f9a7ec3705d3faa.diff

LOG: [VE] i<N> and fp32/64 arguments, return values and constants

Summary:
Support for i<N> and fp32/64 arguments (in register), return values
and constants along with tests.

Reviewed By: arsenm

Differential Revision: https://reviews.llvm.org/D73092

Added: 
    llvm/test/CodeGen/VE/cast.ll
    llvm/test/CodeGen/VE/constants.ll

Modified: 
    llvm/lib/Target/VE/VECallingConv.td
    llvm/lib/Target/VE/VEISelLowering.cpp
    llvm/lib/Target/VE/VEISelLowering.h
    llvm/lib/Target/VE/VEInstrFormats.td
    llvm/lib/Target/VE/VEInstrInfo.cpp
    llvm/lib/Target/VE/VEInstrInfo.td
    llvm/lib/Target/VE/VERegisterInfo.cpp
    llvm/lib/Target/VE/VERegisterInfo.td

Removed: 
    llvm/test/CodeGen/VE/constants_i64.ll


################################################################################
diff  --git a/llvm/lib/Target/VE/VECallingConv.td b/llvm/lib/Target/VE/VECallingConv.td
index 8bb50ba4a3fa..110505674312 100644
--- a/llvm/lib/Target/VE/VECallingConv.td
+++ b/llvm/lib/Target/VE/VECallingConv.td
@@ -17,14 +17,40 @@
 def CC_VE : CallingConv<[
   // All arguments get passed in generic registers if there is space.
 
+  // Promote i1/i8/i16 arguments to i32.
+  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+
+  // bool, char, int, enum, long --> generic integer 32 bit registers
+  CCIfType<[i32], CCAssignToRegWithShadow<
+    [SW0, SW1, SW2, SW3, SW4, SW5, SW6, SW7],
+    [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>,
+
+  // float --> generic floating point 32 bit registers
+  CCIfType<[f32], CCAssignToRegWithShadow<
+    [SF0, SF1, SF2, SF3, SF4, SF5, SF6, SF7],
+    [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>,
+
   // long long/double --> generic 64 bit registers
-  CCIfType<[i64],
+  CCIfType<[i64, f64],
            CCAssignToReg<[SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>,
 ]>;
 
 def RetCC_VE : CallingConv<[
+  // Promote i1/i8/i16 arguments to i32.
+  CCIfType<[i1, i8, i16], CCPromoteToType<i32>>,
+
+  // bool, char, int, enum, long --> generic integer 32 bit registers
+  CCIfType<[i32], CCAssignToRegWithShadow<
+    [SW0, SW1, SW2, SW3, SW4, SW5, SW6, SW7],
+    [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>,
+
+  // float --> generic floating point 32 bit registers
+  CCIfType<[f32], CCAssignToRegWithShadow<
+    [SF0, SF1, SF2, SF3, SF4, SF5, SF6, SF7],
+    [SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>,
+
   // long long/double --> generic 64 bit registers
-  CCIfType<[i64],
+  CCIfType<[i64, f64],
            CCAssignToReg<[SX0, SX1, SX2, SX3, SX4, SX5, SX6, SX7]>>,
 ]>;
 

diff  --git a/llvm/lib/Target/VE/VEISelLowering.cpp b/llvm/lib/Target/VE/VEISelLowering.cpp
index 636e8d61e955..471d4c2c9ef8 100644
--- a/llvm/lib/Target/VE/VEISelLowering.cpp
+++ b/llvm/lib/Target/VE/VEISelLowering.cpp
@@ -89,6 +89,8 @@ VETargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
       llvm_unreachable("Unknown loc info!");
     }
 
+    assert(!VA.needsCustom() && "Unexpected custom lowering");
+
     Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
 
     // Guarantee that all emitted copies are stuck together with flags.
@@ -136,8 +138,10 @@ SDValue VETargetLowering::LowerFormalArguments(
           MF.addLiveIn(VA.getLocReg(), getRegClassFor(VA.getLocVT()));
       SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
 
-      assert((VA.getValVT() == MVT::i64) &&
-             "TODO implement other argument types than i64");
+      // Get the high bits for i32 struct elements.
+      if (VA.getValVT() == MVT::i32 && VA.needsCustom())
+        Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
+                          DAG.getConstant(32, DL, MVT::i32));
 
       // The caller promoted the argument, so insert an Assert?ext SDNode so we
       // won't promote the value again in this function.
@@ -193,6 +197,14 @@ Register VETargetLowering::getRegisterByName(const char *RegName, LLT VT,
 // TargetLowering Implementation
 //===----------------------------------------------------------------------===//
 
+/// isFPImmLegal - Returns true if the target can instruction select the
+/// specified FP immediate natively. If false, the legalizer will
+/// materialize the FP immediate as a load from a constant pool.
+bool VETargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
+                                    bool ForCodeSize) const {
+  return VT == MVT::f32 || VT == MVT::f64;
+}
+
 VETargetLowering::VETargetLowering(const TargetMachine &TM,
                                    const VESubtarget &STI)
     : TargetLowering(TM), Subtarget(&STI) {
@@ -205,7 +217,10 @@ VETargetLowering::VETargetLowering(const TargetMachine &TM,
   setBooleanVectorContents(ZeroOrOneBooleanContent);
 
   // Set up the register classes.
+  addRegisterClass(MVT::i32, &VE::I32RegClass);
   addRegisterClass(MVT::i64, &VE::I64RegClass);
+  addRegisterClass(MVT::f32, &VE::F32RegClass);
+  addRegisterClass(MVT::f64, &VE::I64RegClass);
 
   setStackPointerRegisterToSaveRestore(VE::SX11);
 

diff  --git a/llvm/lib/Target/VE/VEISelLowering.h b/llvm/lib/Target/VE/VEISelLowering.h
index 39b3610a0c3a..1aec71d379a3 100644
--- a/llvm/lib/Target/VE/VEISelLowering.h
+++ b/llvm/lib/Target/VE/VEISelLowering.h
@@ -34,6 +34,9 @@ class VETargetLowering : public TargetLowering {
   VETargetLowering(const TargetMachine &TM, const VESubtarget &STI);
 
   const char *getTargetNodeName(unsigned Opcode) const override;
+  MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
+    return MVT::i32;
+  }
 
   Register getRegisterByName(const char *RegName, LLT VT,
                              const MachineFunction &MF) const override;
@@ -56,6 +59,9 @@ class VETargetLowering : public TargetLowering {
                       const SmallVectorImpl<ISD::OutputArg> &Outs,
                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &dl,
                       SelectionDAG &DAG) const override;
+
+  bool isFPImmLegal(const APFloat &Imm, EVT VT,
+                    bool ForCodeSize) const override;
 };
 } // namespace llvm
 

diff  --git a/llvm/lib/Target/VE/VEInstrFormats.td b/llvm/lib/Target/VE/VEInstrFormats.td
index a8d3e786ba89..f2f281206c57 100644
--- a/llvm/lib/Target/VE/VEInstrFormats.td
+++ b/llvm/lib/Target/VE/VEInstrFormats.td
@@ -44,8 +44,8 @@ class RM<bits<8>opVal, dag outs, dag ins, string asmstr, list<dag> pattern=[]>
   let Inst{63-32}  = imm32;
 }
 
-class RR<bits<8>opVal, dag outs, dag ins, string asmstr>
-   : RM<opVal, outs, ins, asmstr> {
+class RR<bits<8>opVal, dag outs, dag ins, string asmstr, list<dag> pattern=[]>
+   : RM<opVal, outs, ins, asmstr, pattern> {
   bits<1> cw = 0;
   bits<1> cw2 = 0;
   bits<4> cfw = 0;

diff  --git a/llvm/lib/Target/VE/VEInstrInfo.cpp b/llvm/lib/Target/VE/VEInstrInfo.cpp
index 37ad69291e74..bbd68196ada2 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.cpp
+++ b/llvm/lib/Target/VE/VEInstrInfo.cpp
@@ -38,12 +38,18 @@ VEInstrInfo::VEInstrInfo(VESubtarget &ST)
     : VEGenInstrInfo(VE::ADJCALLSTACKDOWN, VE::ADJCALLSTACKUP), RI(),
       Subtarget(ST) {}
 
+static bool IsAliasOfSX(Register Reg) {
+  return VE::I8RegClass.contains(Reg) || VE::I16RegClass.contains(Reg) ||
+         VE::I32RegClass.contains(Reg) || VE::I64RegClass.contains(Reg) ||
+         VE::F32RegClass.contains(Reg);
+}
+
 void VEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
                               MachineBasicBlock::iterator I, const DebugLoc &DL,
                               MCRegister DestReg, MCRegister SrcReg,
                               bool KillSrc) const {
 
-  if (VE::I64RegClass.contains(SrcReg) && VE::I64RegClass.contains(DestReg)) {
+  if (IsAliasOfSX(SrcReg) && IsAliasOfSX(DestReg)) {
     BuildMI(MBB, I, DL, get(VE::ORri), DestReg)
         .addReg(SrcReg, getKillRegState(KillSrc))
         .addImm(0);

diff  --git a/llvm/lib/Target/VE/VEInstrInfo.td b/llvm/lib/Target/VE/VEInstrInfo.td
index e18af8090607..82b6b0dd8bad 100644
--- a/llvm/lib/Target/VE/VEInstrInfo.td
+++ b/llvm/lib/Target/VE/VEInstrInfo.td
@@ -32,6 +32,24 @@ def lomsbzero   : PatLeaf<(imm), [{ return (N->getZExtValue() & 0x80000000)
                                       == 0; }]>;
 def lozero      : PatLeaf<(imm), [{ return (N->getZExtValue() & 0xffffffff)
                                       == 0; }]>;
+def fplomsbzero : PatLeaf<(fpimm), [{ return (N->getValueAPF().bitcastToAPInt()
+                                      .getZExtValue() & 0x80000000) == 0; }]>;
+def fplozero    : PatLeaf<(fpimm), [{ return (N->getValueAPF().bitcastToAPInt()
+                                      .getZExtValue() & 0xffffffff) == 0; }]>;
+
+def LOFP32 : SDNodeXForm<fpimm, [{
+  // Get a integer immediate from fpimm
+  const APInt& imm = N->getValueAPF().bitcastToAPInt();
+  return CurDAG->getTargetConstant(Lo_32(imm.getZExtValue() & 0xffffffff),
+                                   SDLoc(N), MVT::i64);
+}]>;
+
+def HIFP32 : SDNodeXForm<fpimm, [{
+  // Get a integer immediate from fpimm
+  const APInt& imm = N->getValueAPF().bitcastToAPInt();
+  return CurDAG->getTargetConstant(Hi_32(imm.getZExtValue()),
+                                   SDLoc(N), MVT::i64);
+}]>;
 
 def LO32 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(Lo_32(N->getZExtValue()),
@@ -61,14 +79,26 @@ def brtarget32 : Operand<OtherVT> {
   let EncoderMethod = "getBranchTarget32OpValue";
 }
 
+def simm7Op32 : Operand<i32> {
+  let DecoderMethod = "DecodeSIMM7";
+}
+
 def simm7Op64 : Operand<i64> {
   let DecoderMethod = "DecodeSIMM7";
 }
 
+def simm32Op32 : Operand<i32> {
+  let DecoderMethod = "DecodeSIMM32";
+}
+
 def simm32Op64 : Operand<i64> {
   let DecoderMethod = "DecodeSIMM32";
 }
 
+def uimm6Op32 : Operand<i32> {
+  let DecoderMethod = "DecodeUIMM6";
+}
+
 def uimm6Op64 : Operand<i64> {
   let DecoderMethod = "DecodeUIMM6";
 }
@@ -156,29 +186,32 @@ multiclass RMm<string opcStr, bits<8>opc,
 
 // Multiclass for RR type instructions
 
-multiclass RRmrr<string opcStr, bits<8>opc,
+multiclass RRmrr<string opcStr, bits<8>opc, SDNode OpNode,
                  RegisterClass RCo, ValueType Tyo,
                  RegisterClass RCi, ValueType Tyi> {
   def rr : RR<opc, (outs RCo:$sx), (ins RCi:$sy, RCi:$sz),
-              !strconcat(opcStr, " $sx, $sy, $sz")>
+              !strconcat(opcStr, " $sx, $sy, $sz"),
+              [(set Tyo:$sx, (OpNode Tyi:$sy, Tyi:$sz))]>
            { let cy = 1; let cz = 1; let hasSideEffects = 0; }
 }
 
-multiclass RRmri<string opcStr, bits<8>opc,
+multiclass RRmri<string opcStr, bits<8>opc, SDNode OpNode,
                  RegisterClass RCo, ValueType Tyo,
                  RegisterClass RCi, ValueType Tyi, Operand immOp> {
   // VE calculates (OpNode $sy, $sz), but llvm requires to have immediate
   // in RHS, so we use following definition.
   def ri : RR<opc, (outs RCo:$sx), (ins RCi:$sz, immOp:$sy),
-              !strconcat(opcStr, " $sx, $sy, $sz")>
+              !strconcat(opcStr, " $sx, $sy, $sz"),
+              [(set Tyo:$sx, (OpNode Tyi:$sz, (Tyi simm7:$sy)))]>
            { let cy = 0; let cz = 1; let hasSideEffects = 0; }
 }
 
-multiclass RRmiz<string opcStr, bits<8>opc,
+multiclass RRmiz<string opcStr, bits<8>opc, SDNode OpNode,
                  RegisterClass RCo, ValueType Tyo,
                  RegisterClass RCi, ValueType Tyi, Operand immOp> {
   def zi : RR<opc, (outs RCo:$sx), (ins immOp:$sy),
-              !strconcat(opcStr, " $sx, $sy")>
+              !strconcat(opcStr, " $sx, $sy"),
+              [(set Tyo:$sx, (OpNode (Tyi simm7:$sy), 0))]>
            { let cy = 0; let cz = 0; let sz = 0; let hasSideEffects = 0; }
 }
 
@@ -194,6 +227,12 @@ multiclass RRNDmrm<string opcStr, bits<8>opc,
               // it fails to infer from a pattern.
               let hasSideEffects = 0;
             }
+  def rm1 : RR<opc, (outs RCo:$sx), (ins RCi:$sy, immOp2:$sz),
+               !strconcat(opcStr, " $sx, $sy, (${sz})1")> {
+              let cy = 1;
+              let cz = 0;
+              let hasSideEffects = 0;
+            }
 }
 
 multiclass RRNDmim<string opcStr, bits<8>opc,
@@ -211,14 +250,30 @@ multiclass RRNDmim<string opcStr, bits<8>opc,
 // Used by add, mul, div, and similar commutative instructions
 //   The order of operands are "$sx, $sy, $sz"
 
-multiclass RRm<string opcStr, bits<8>opc,
+multiclass RRm<string opcStr, bits<8>opc, SDNode OpNode,
                RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> :
-  RRmrr<opcStr, opc, RC, Ty, RC, Ty>,
-  RRmri<opcStr, opc, RC, Ty, RC, Ty, immOp>,
-  RRmiz<opcStr, opc, RC, Ty, RC, Ty, immOp>,
+  RRmrr<opcStr, opc, OpNode, RC, Ty, RC, Ty>,
+  RRmri<opcStr, opc, OpNode, RC, Ty, RC, Ty, immOp>,
+  RRmiz<opcStr, opc, OpNode, RC, Ty, RC, Ty, immOp>,
   RRNDmrm<opcStr, opc, RC, Ty, RC, Ty, immOp2>,
   RRNDmim<opcStr, opc, RC, Ty, RC, Ty, immOp, immOp2>;
 
+// Multiclass for RR type instructions
+//   Used by sra, sla, sll, and similar instructions
+//   The order of operands are "$sx, $sz, $sy"
+
+multiclass RRIm<string opcStr, bits<8>opc, SDNode OpNode,
+                RegisterClass RC, ValueType Ty, Operand immOp, Operand immOp2> {
+  def ri : RR<
+    opc, (outs RC:$sx), (ins RC:$sz, immOp:$sy),
+    !strconcat(opcStr, " $sx, $sz, $sy"),
+    [(set Ty:$sx, (OpNode Ty:$sz, (i32 simm7:$sy)))]> {
+    let cy = 0;
+    let cz = 1;
+    let hasSideEffects = 0;
+  }
+}
+
 // Branch multiclass
 let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in
 multiclass BCRm<string opcStr, string opcStrAt, bits<8> opc,
@@ -233,7 +288,6 @@ multiclass BCRm<string opcStr, string opcStrAt, bits<8> opc,
   }
 }
 
-
 //===----------------------------------------------------------------------===//
 // Instructions
 //===----------------------------------------------------------------------===//
@@ -243,20 +297,50 @@ let cx = 0 in
 defm LEA : RMm<"lea", 0x06, I64, i64, simm7Op64, simm32Op64>;
 let cx = 1 in
 defm LEASL : RMm<"lea.sl", 0x06, I64, i64, simm7Op64, simm32Op64>;
+let isCodeGenOnly = 1 in {
+let cx = 0 in
+defm LEA32 : RMm<"lea", 0x06, I32, i32, simm7Op32, simm32Op32>;
+}
+
 
 // 5.3.2.2. Fixed-Point Arithmetic Operation Instructions
 
+// ADS instruction
+let cx = 0 in
+defm ADS : RRm<"adds.w.sx", 0x4A, add, I32, i32, simm7Op32, uimm6Op32>;
+let cx = 1 in
+defm ADSU : RRm<"adds.w.zx", 0x4A, add, I32, i32, simm7Op32, uimm6Op32>;
+
+
 // ADX instruction
 let cx = 0 in
-defm ADX : RRm<"adds.l", 0x59, I64, i64, simm7Op64, uimm6Op64>;
+defm ADX : RRm<"adds.l", 0x59, add, I64, i64, simm7Op64, uimm6Op64>;
 
 // 5.3.2.3. Logical Arithmetic Operation Instructions
 
 let cx = 0 in {
-  defm AND : RRm<"and", 0x44, I64, i64, simm7Op64, uimm6Op64>;
-  defm OR : RRm<"or", 0x45, I64, i64, simm7Op64, uimm6Op64>;
+  defm AND : RRm<"and", 0x44, and, I64, i64, simm7Op64, uimm6Op64>;
+  defm OR : RRm<"or", 0x45, or, I64, i64, simm7Op64, uimm6Op64>;
+  let isCodeGenOnly = 1 in {
+    defm AND32 : RRm<"and", 0x44, and, I32, i32, simm7Op32, uimm6Op32>;
+    defm OR32 : RRm<"or", 0x45, or, I32, i32, simm7Op32, uimm6Op32>;
+  }
 }
 
+
+// 5.3.2.4 Shift Instructions
+
+let cx = 0 in
+defm SRAX : RRIm<"sra.l", 0x77, sra, I64, i64, simm7Op32, uimm6Op64>;
+let cx = 0 in
+defm SRA : RRIm<"sra.w.sx", 0x76, sra, I32, i32, simm7Op32, uimm6Op32>;
+
+let cx = 0 in
+defm SLL : RRIm<"sll", 0x65, shl, I64, i64, simm7Op32, uimm6Op64>;
+let cx = 0 in
+defm SLA : RRIm<"sla.w.sx", 0x66, shl, I32, i32, simm7Op32, uimm6Op32>;
+
+
 // Load and Store instructions
 // As 1st step, only uses sz and imm32 to represent $addr
 let mayLoad = 1, hasSideEffects = 0 in {
@@ -307,8 +391,10 @@ def MONC : RR<
 //===----------------------------------------------------------------------===//
 
 // Small immediates.
+def : Pat<(i32 simm7:$val), (OR32im1 imm:$val, 0)>;
 def : Pat<(i64 simm7:$val), (ORim1 imm:$val, 0)>;
 // Medium immediates.
+def : Pat<(i32 simm32:$val), (LEA32zzi imm:$val)>;
 def : Pat<(i64 simm32:$val), (LEAzzi imm:$val)>;
 def : Pat<(i64 uimm32:$val), (ANDrm0 (LEAzzi imm:$val), 32)>;
 // Arbitrary immediates.
@@ -320,6 +406,66 @@ def : Pat<(i64 imm:$val),
           (LEASLrzi (ANDrm0 (LEAzzi (LO32 imm:$val)), 32),
                     (HI32 imm:$val))>;
 
+// floating point
+def : Pat<(f32 fpimm:$val),
+          (COPY_TO_REGCLASS (LEASLzzi (LOFP32 $val)), F32)>;
+def : Pat<(f64 fplozero:$val),
+          (LEASLzzi (HIFP32 $val))>;
+def : Pat<(f64 fplomsbzero:$val),
+          (LEASLrzi (LEAzzi (LOFP32 $val)), (HIFP32 $val))>;
+def : Pat<(f64 fpimm:$val),
+          (LEASLrzi (ANDrm0 (LEAzzi (LOFP32 $val)), 32),
+                    (HIFP32 $val))>;
+
+// The same integer registers are used for i32 and i64 values.
+// When registers hold i32 values, the high bits are unused. 
+
+// TODO Use standard expansion for shift-based lowering of sext_inreg
+
+// Cast to i1
+def : Pat<(sext_inreg I32:$src, i1),
+          (SRAri (SLAri $src, 31), 31)>;
+def : Pat<(sext_inreg I64:$src, i1),
+          (SRAXri (SLLri $src, 63), 63)>;
+
+// Cast to i8
+def : Pat<(sext_inreg I32:$src, i8),
+          (SRAri (SLAri $src, 24), 24)>;
+def : Pat<(sext_inreg I64:$src, i8),
+          (SRAXri (SLLri $src, 56), 56)>;
+def : Pat<(sext_inreg (i32 (trunc i64:$src)), i8),
+          (EXTRACT_SUBREG (SRAXri (SLLri $src, 56), 56), sub_i32)>;
+def : Pat<(and (trunc i64:$src), 0xff),
+          (AND32rm0 (EXTRACT_SUBREG $src, sub_i32), 56)>;
+
+// Cast to i16
+def : Pat<(sext_inreg I32:$src, i16),
+          (SRAri (SLAri $src, 16), 16)>;
+def : Pat<(sext_inreg I64:$src, i16),
+          (SRAXri (SLLri $src, 48), 48)>;
+def : Pat<(sext_inreg (i32 (trunc i64:$src)), i16),
+          (EXTRACT_SUBREG (SRAXri (SLLri $src, 48), 48), sub_i32)>;
+def : Pat<(and (trunc i64:$src), 0xffff),
+          (AND32rm0 (EXTRACT_SUBREG $src, sub_i32), 48)>;
+
+// Cast to i32
+def : Pat<(i32 (trunc i64:$src)),
+          (ADSrm1 (EXTRACT_SUBREG $src, sub_i32), 0)>;
+
+// Cast to i64
+def : Pat<(sext_inreg I64:$src, i32),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)),
+            (ADSrm1 (EXTRACT_SUBREG $src, sub_i32), 0), sub_i32)>;
+def : Pat<(i64 (sext i32:$sy)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (ADSrm1 $sy, 0), sub_i32)>;
+def : Pat<(i64 (zext i32:$sy)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (ADSUrm1 $sy, 0), sub_i32)>;
+
+def : Pat<(i64 (anyext i32:$sy)),
+          (INSERT_SUBREG (i64 (IMPLICIT_DEF)), $sy, sub_i32)>;
+
+
+
 //===----------------------------------------------------------------------===//
 // Pseudo Instructions
 //===----------------------------------------------------------------------===//
@@ -341,3 +487,12 @@ let  hasSideEffects = 0 in
 def EXTEND_STACK_GUARD : Pseudo<(outs), (ins),
                                 "# EXTEND STACK GUARD",
                                 []>;
+
+// Several special pattern matches to optimize code
+
+def : Pat<(i32 (and i32:$lhs, 0xff)),
+          (AND32rm0 $lhs, 56)>;
+def : Pat<(i32 (and i32:$lhs, 0xffff)),
+          (AND32rm0 $lhs, 48)>;
+def : Pat<(i32 (and i32:$lhs, 0xffffffff)),
+          (AND32rm0 $lhs, 32)>;

diff  --git a/llvm/lib/Target/VE/VERegisterInfo.cpp b/llvm/lib/Target/VE/VERegisterInfo.cpp
index e1ff614abc20..74ccc70d2ed8 100644
--- a/llvm/lib/Target/VE/VERegisterInfo.cpp
+++ b/llvm/lib/Target/VE/VERegisterInfo.cpp
@@ -48,21 +48,29 @@ const uint32_t *VERegisterInfo::getNoPreservedMask() const {
 
 BitVector VERegisterInfo::getReservedRegs(const MachineFunction &MF) const {
   BitVector Reserved(getNumRegs());
-  Reserved.set(VE::SX8);  // stack limit
-  Reserved.set(VE::SX9);  // frame pointer
-  Reserved.set(VE::SX10); // link register (return address)
-  Reserved.set(VE::SX11); // stack pointer
 
-  Reserved.set(VE::SX12); // outer register
-  Reserved.set(VE::SX13); // id register for dynamic linker
-
-  Reserved.set(VE::SX14); // thread pointer
-  Reserved.set(VE::SX15); // global offset table register
-  Reserved.set(VE::SX16); // procedure linkage table register
-  Reserved.set(VE::SX17); // linkage-area register
-
-  // sx18-sx33 are callee-saved registers
-  // sx34-sx63 are temporary registers
+  const Register ReservedRegs[] = {
+      VE::SX8,  // Stack limit
+      VE::SX9,  // Frame pointer
+      VE::SX10, // Link register (return address)
+      VE::SX11, // Stack pointer
+
+      // FIXME: maybe not need to be reserved
+      VE::SX12, // Outer register
+      VE::SX13, // Id register for dynamic linker
+
+      VE::SX14, // Thread pointer
+      VE::SX15, // Global offset table register
+      VE::SX16, // Procedure linkage table register
+      VE::SX17, // Linkage-area register
+                // sx18-sx33 are callee-saved registers
+                // sx34-sx63 are temporary registers
+  };
+
+  for (auto R : ReservedRegs)
+    for (MCRegAliasIterator ItAlias(R, this, true); ItAlias.isValid();
+         ++ItAlias)
+      Reserved.set(*ItAlias);
 
   return Reserved;
 }

diff  --git a/llvm/lib/Target/VE/VERegisterInfo.td b/llvm/lib/Target/VE/VERegisterInfo.td
index ef5b9c09705a..e035049076e9 100644
--- a/llvm/lib/Target/VE/VERegisterInfo.td
+++ b/llvm/lib/Target/VE/VERegisterInfo.td
@@ -16,6 +16,13 @@ class VEReg<bits<7> Enc, string n> : Register<n> {
   let Namespace = "VE";
 }
 
+let Namespace = "VE" in {
+  def sub_i8      : SubRegIndex<8, 56>;         // Low 8 bit (56..63)
+  def sub_i16     : SubRegIndex<16, 48>;        // Low 16 bit (48..63)
+  def sub_i32     : SubRegIndex<32, 32>;        // Low 32 bit (32..63)
+  def sub_f32     : SubRegIndex<32>;            // High 32 bit (0..31)
+}
+
 // Registers are identified with 7-bit ID numbers.
 // R - 64-bit integer or floating-point registers
 class R<bits<7> Enc, string n, list<Register> subregs = [],
@@ -24,14 +31,53 @@ class R<bits<7> Enc, string n, list<Register> subregs = [],
   let Aliases = aliases;
 }
 
+// Generic integer registers - 8 bits wide
+foreach I = 0-63 in
+  def SB#I : R<I, "S"#I>, DwarfRegNum<[I]>;
+
+// Generic integer registers - 16 bits wide
+let SubRegIndices = [sub_i8] in
+foreach I = 0-63 in
+  def SH#I : R<I, "S"#I, [!cast<R>("SB"#I)]>, DwarfRegNum<[I]>;
+
+// Generic integer registers - 32 bits wide
+let SubRegIndices = [sub_i16] in
+foreach I = 0-63 in
+  def SW#I : R<I, "S"#I, [!cast<R>("SH"#I)]>, DwarfRegNum<[I]>;
+
+// Generic floating point registers - 32 bits wide
+//   NOTE: Mark SF#I as alias of SW#I temporary to avoid register allocation
+//         problem.
+foreach I = 0-63 in
+  def SF#I : R<I, "S"#I, [], [!cast<R>("SW"#I)]>, DwarfRegNum<[I]>;
+
 // Generic integer registers - 64 bits wide
+let SubRegIndices = [sub_i32, sub_f32], CoveredBySubRegs = 1 in
 foreach I = 0-63 in
-  def SX#I : R<I, "S"#I, []>,
+  def SX#I : R<I, "S"#I, [!cast<R>("SW"#I), !cast<R>("SF"#I)]>,
              DwarfRegNum<[I]>;
 
 // Register classes.
 //
 // The register order is defined in terms of the preferred
 // allocation order.
-def I64 : RegisterClass<"VE", [i64], 64,
-                        (sequence "SX%u", 0, 63)>;
+def I8  : RegisterClass<"VE", [i8], 8,
+                        (add (sequence "SB%u", 0, 7),
+                             (sequence "SB%u", 34, 63),
+                             (sequence "SB%u", 8, 33))>;
+def I16 : RegisterClass<"VE", [i16], 16,
+                        (add (sequence "SH%u", 0, 7),
+                             (sequence "SH%u", 34, 63),
+                             (sequence "SH%u", 8, 33))>;
+def I32 : RegisterClass<"VE", [i32], 32,
+                        (add (sequence "SW%u", 0, 7),
+                             (sequence "SW%u", 34, 63),
+                             (sequence "SW%u", 8, 33))>;
+def I64 : RegisterClass<"VE", [i64, f64], 64,
+                        (add (sequence "SX%u", 0, 7),
+                             (sequence "SX%u", 34, 63),
+                             (sequence "SX%u", 8, 33))>;
+def F32 : RegisterClass<"VE", [f32], 32,
+                        (add (sequence "SF%u", 0, 7),
+                             (sequence "SF%u", 34, 63),
+                             (sequence "SF%u", 8, 33))>;

diff  --git a/llvm/test/CodeGen/VE/cast.ll b/llvm/test/CodeGen/VE/cast.ll
new file mode 100644
index 000000000000..d852ab574a8e
--- /dev/null
+++ b/llvm/test/CodeGen/VE/cast.ll
@@ -0,0 +1,972 @@
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+define i32 @i() {
+; CHECK-LABEL: i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 -2147483648
+}
+
+define i32 @ui() {
+; CHECK-LABEL: ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 -2147483648
+}
+
+define i64 @ll() {
+; CHECK-LABEL: ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 -2147483648
+}
+
+define i64 @ull() {
+; CHECK-LABEL: ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 2147483648
+}
+
+define double @d2d(double returned %0) {
+; CHECK-LABEL: d2d:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret double %0
+}
+
+define float @f2f(float returned %0) {
+; CHECK-LABEL: f2f:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret float %0
+}
+define signext i8 @ll2c(i64 %0) {
+; CHECK-LABEL: ll2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 56
+; CHECK-NEXT:    sra.l %s0, %s0, 56
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i8
+  ret i8 %2
+}
+
+define zeroext i8 @ll2uc(i64 %0) {
+; CHECK-LABEL: ll2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i8
+  ret i8 %2
+}
+
+define signext i16 @ll2s(i64 %0) {
+; CHECK-LABEL: ll2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 48
+; CHECK-NEXT:    sra.l %s0, %s0, 48
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i16
+  ret i16 %2
+}
+
+define zeroext i16 @ll2us(i64 %0) {
+; CHECK-LABEL: ll2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i16
+  ret i16 %2
+}
+
+define i32 @ll2i(i64 %0) {
+; CHECK-LABEL: ll2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i32
+  ret i32 %2
+}
+
+define i32 @ll2ui(i64 %0) {
+; CHECK-LABEL: ll2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i32
+  ret i32 %2
+}
+
+define i64 @ll2ll(i64 returned %0) {
+; CHECK-LABEL: ll2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 %0
+}
+
+define i64 @ll2ull(i64 returned %0) {
+; CHECK-LABEL: ll2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 %0
+}
+
+define signext i8 @ull2c(i64 %0) {
+; CHECK-LABEL: ull2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 56
+; CHECK-NEXT:    sra.l %s0, %s0, 56
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i8
+  ret i8 %2
+}
+
+define zeroext i8 @ull2uc(i64 %0) {
+; CHECK-LABEL: ull2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i8
+  ret i8 %2
+}
+
+define signext i16 @ull2s(i64 %0) {
+; CHECK-LABEL: ull2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 48
+; CHECK-NEXT:    sra.l %s0, %s0, 48
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i16
+  ret i16 %2
+}
+
+define zeroext i16 @ull2us(i64 %0) {
+; CHECK-LABEL: ull2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i16
+  ret i16 %2
+}
+
+define i32 @ull2i(i64 %0) {
+; CHECK-LABEL: ull2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i32
+  ret i32 %2
+}
+
+define i32 @ull2ui(i64 %0) {
+; CHECK-LABEL: ull2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i64 %0 to i32
+  ret i32 %2
+}
+
+define i64 @ull2ll(i64 returned %0) {
+; CHECK-LABEL: ull2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 %0
+}
+
+define i64 @ull2ull(i64 returned %0) {
+; CHECK-LABEL: ull2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 %0
+}
+
+define signext i8 @i2c(i32 %0) {
+; CHECK-LABEL: i2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 24
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 24
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i8
+  ret i8 %2
+}
+
+define zeroext i8 @i2uc(i32 %0) {
+; CHECK-LABEL: i2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i8
+  ret i8 %2
+}
+
+define signext i16 @i2s(i32 %0) {
+; CHECK-LABEL: i2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 16
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 16
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i16
+  ret i16 %2
+}
+
+define zeroext i16 @i2us(i32 %0) {
+; CHECK-LABEL: i2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i16
+  ret i16 %2
+}
+
+define i32 @i2i(i32 returned %0) {
+; CHECK-LABEL: i2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 %0
+}
+
+define i32 @i2ui(i32 returned %0) {
+; CHECK-LABEL: i2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 %0
+}
+
+define i64 @i2ll(i32 %0) {
+; CHECK-LABEL: i2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i32 %0 to i64
+  ret i64 %2
+}
+
+define i64 @i2ull(i32 %0) {
+; CHECK-LABEL: i2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i32 %0 to i64
+  ret i64 %2
+}
+
+define signext i8 @ui2c(i32 %0) {
+; CHECK-LABEL: ui2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 24
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 24
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i8
+  ret i8 %2
+}
+
+define zeroext i8 @ui2uc(i32 %0) {
+; CHECK-LABEL: ui2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i8
+  ret i8 %2
+}
+
+define signext i16 @ui2s(i32 %0) {
+; CHECK-LABEL: ui2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 16
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 16
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i16
+  ret i16 %2
+}
+
+define zeroext i16 @ui2us(i32 %0) {
+; CHECK-LABEL: ui2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i32 %0 to i16
+  ret i16 %2
+}
+
+define i32 @ui2i(i32 returned %0) {
+; CHECK-LABEL: ui2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 %0
+}
+
+define i32 @ui2ui(i32 returned %0) {
+; CHECK-LABEL: ui2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 %0
+}
+
+define i64 @ui2ll(i32 %0) {
+; CHECK-LABEL: ui2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i32 %0 to i64
+  ret i64 %2
+}
+
+define i64 @ui2ull(i32 %0) {
+; CHECK-LABEL: ui2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i32 %0 to i64
+  ret i64 %2
+}
+
+define signext i8 @s2c(i16 signext %0) {
+; CHECK-LABEL: s2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 24
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 24
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i16 %0 to i8
+  ret i8 %2
+}
+
+define zeroext i8 @s2uc(i16 signext %0) {
+; CHECK-LABEL: s2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i16 %0 to i8
+  ret i8 %2
+}
+
+define signext i16 @s2s(i16 returned signext %0) {
+; CHECK-LABEL: s2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i16 %0
+}
+
+define zeroext i16 @s2us(i16 returned signext %0) {
+; CHECK-LABEL: s2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i16 %0
+}
+
+define i32 @s2i(i16 signext %0) {
+; CHECK-LABEL: s2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i16 %0 to i32
+  ret i32 %2
+}
+
+define i32 @s2ui(i16 signext %0) {
+; CHECK-LABEL: s2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i16 %0 to i32
+  ret i32 %2
+}
+
+define i64 @s2ll(i16 signext %0) {
+; CHECK-LABEL: s2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i16 %0 to i64
+  ret i64 %2
+}
+
+define i64 @s2ull(i16 signext %0) {
+; CHECK-LABEL: s2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i16 %0 to i64
+  ret i64 %2
+}
+
+define signext i8 @us2c(i16 zeroext %0) {
+; CHECK-LABEL: us2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 24
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 24
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i16 %0 to i8
+  ret i8 %2
+}
+
+define zeroext i8 @us2uc(i16 zeroext %0) {
+; CHECK-LABEL: us2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i16 %0 to i8
+  ret i8 %2
+}
+
+define signext i16 @us2s(i16 returned zeroext %0) {
+; CHECK-LABEL: us2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 16
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 16
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i16 %0
+}
+
+define zeroext i16 @us2us(i16 returned zeroext %0) {
+; CHECK-LABEL: us2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i16 %0
+}
+
+define i32 @us2i(i16 zeroext %0) {
+; CHECK-LABEL: us2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i16 %0 to i32
+  ret i32 %2
+}
+
+define i32 @us2ui(i16 zeroext %0) {
+; CHECK-LABEL: us2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i16 %0 to i32
+  ret i32 %2
+}
+
+define i64 @us2ll(i16 zeroext %0) {
+; CHECK-LABEL: us2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i16 %0 to i64
+  ret i64 %2
+}
+
+define i64 @us2ull(i16 zeroext %0) {
+; CHECK-LABEL: us2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i16 %0 to i64
+  ret i64 %2
+}
+
+define signext i8 @c2c(i8 returned signext %0) {
+; CHECK-LABEL: c2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 %0
+}
+
+define zeroext i8 @c2uc(i8 returned signext %0) {
+; CHECK-LABEL: c2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 %0
+}
+
+define signext i16 @c2s(i8 signext %0) {
+; CHECK-LABEL: c2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i16
+  ret i16 %2
+}
+
+define zeroext i16 @c2us(i8 signext %0) {
+; CHECK-LABEL: c2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i16
+  ret i16 %2
+}
+
+define i32 @c2i(i8 signext %0) {
+; CHECK-LABEL: c2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i32
+  ret i32 %2
+}
+
+define i32 @c2ui(i8 signext %0) {
+; CHECK-LABEL: c2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i32
+  ret i32 %2
+}
+
+define i64 @c2ll(i8 signext %0) {
+; CHECK-LABEL: c2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i64
+  ret i64 %2
+}
+
+define i64 @c2ull(i8 signext %0) {
+; CHECK-LABEL: c2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i64
+  ret i64 %2
+}
+
+define signext i8 @uc2c(i8 returned zeroext %0) {
+; CHECK-LABEL: uc2c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sla.w.sx %s0, %s0, 24
+; CHECK-NEXT:    sra.w.sx %s0, %s0, 24
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 %0
+}
+
+define zeroext i8 @uc2uc(i8 returned zeroext %0) {
+; CHECK-LABEL: uc2uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 %0
+}
+
+define signext i16 @uc2s(i8 zeroext %0) {
+; CHECK-LABEL: uc2s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i16
+  ret i16 %2
+}
+
+define zeroext i16 @uc2us(i8 zeroext %0) {
+; CHECK-LABEL: uc2us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i16
+  ret i16 %2
+}
+
+define i32 @uc2i(i8 zeroext %0) {
+; CHECK-LABEL: uc2i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i32
+  ret i32 %2
+}
+
+define i32 @uc2ui(i8 zeroext %0) {
+; CHECK-LABEL: uc2ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i32
+  ret i32 %2
+}
+
+define i64 @uc2ll(i8 zeroext %0) {
+; CHECK-LABEL: uc2ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i64
+  ret i64 %2
+}
+
+define i64 @uc2ull(i8 zeroext %0) {
+; CHECK-LABEL: uc2ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i64
+  ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @i128() {
+; CHECK-LABEL: i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    or %s1, -1, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i128 -2147483648
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ui128() {
+; CHECK-LABEL: ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    or %s1, -1, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i128 -2147483648
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i8 @i1282c(i128 %0) {
+; CHECK-LABEL: i1282c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 56
+; CHECK-NEXT:    sra.l %s0, %s0, 56
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i8
+  ret i8 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i8 @ui1282c(i128 %0) {
+; CHECK-LABEL: ui1282c:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 56
+; CHECK-NEXT:    sra.l %s0, %s0, 56
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i8
+  ret i8 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define zeroext i8 @i1282uc(i128 %0) {
+; CHECK-LABEL: i1282uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i8
+  ret i8 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define zeroext i8 @ui1282uc(i128 %0) {
+; CHECK-LABEL: ui1282uc:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (56)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i8
+  ret i8 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i16 @i1282s(i128 %0) {
+; CHECK-LABEL: i1282s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 48
+; CHECK-NEXT:    sra.l %s0, %s0, 48
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i16
+  ret i16 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define signext i16 @ui1282s(i128 %0) {
+; CHECK-LABEL: ui1282s:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sll %s0, %s0, 48
+; CHECK-NEXT:    sra.l %s0, %s0, 48
+; CHECK-NEXT:    # kill: def $sw0 killed $sw0 killed $sx0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i16
+  ret i16 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define zeroext i16 @i1282us(i128 %0) {
+; CHECK-LABEL: i1282us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i16
+  ret i16 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define zeroext i16 @ui1282us(i128 %0) {
+; CHECK-LABEL: ui1282us:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    and %s0, %s0, (48)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i16
+  ret i16 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @i1282i(i128 %0) {
+; CHECK-LABEL: i1282i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i32
+  ret i32 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @ui1282i(i128 %0) {
+; CHECK-LABEL: ui1282i:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i32
+  ret i32 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @i1282ui(i128 %0) {
+; CHECK-LABEL: i1282ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i32
+  ret i32 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i32 @ui1282ui(i128 %0) {
+; CHECK-LABEL: ui1282ui:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i32
+  ret i32 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @i1282ll(i128 %0) {
+; CHECK-LABEL: i1282ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i64
+  ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @ui1282ll(i128 %0) {
+; CHECK-LABEL: ui1282ll:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i64
+  ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @i1282ull(i128 %0) {
+; CHECK-LABEL: i1282ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i64
+  ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i64 @ui1282ull(i128 %0) {
+; CHECK-LABEL: ui1282ull:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = trunc i128 %0 to i64
+  ret i64 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @i1282ui128(i128 returned %0) {
+; CHECK-LABEL: i1282ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i128 %0
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ui1282i128(i128 returned %0) {
+; CHECK-LABEL: ui1282i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i128 %0
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ll2i128(i64 %0) {
+; CHECK-LABEL: ll2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i64 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ll2ui128(i64 %0) {
+; CHECK-LABEL: ll2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i64 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ull2i128(i64 %0) {
+; CHECK-LABEL: ull2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i64 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ull2ui128(i64 %0) {
+; CHECK-LABEL: ull2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i64 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @i2i128(i32 %0) {
+; CHECK-LABEL: i2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i32 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @i2ui128(i32 %0) {
+; CHECK-LABEL: i2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i32 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ui2i128(i32 %0) {
+; CHECK-LABEL: ui2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i32 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @ui2ui128(i32 %0) {
+; CHECK-LABEL: ui2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i32 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @s2i128(i16 signext %0) {
+; CHECK-LABEL: s2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i16 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @s2ui128(i16 signext %0) {
+; CHECK-LABEL: s2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i16 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @us2i128(i16 zeroext %0) {
+; CHECK-LABEL: us2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i16 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @us2ui128(i16 zeroext %0) {
+; CHECK-LABEL: us2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i16 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @c2i128(i8 signext %0) {
+; CHECK-LABEL: c2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @char2ui128(i8 signext %0) {
+; CHECK-LABEL: char2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.sx %s0, %s0, (0)1
+; CHECK-NEXT:    sra.l %s1, %s0, 63
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = sext i8 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @uc2i128(i8 zeroext %0) {
+; CHECK-LABEL: uc2i128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i128
+  ret i128 %2
+}
+
+; Function Attrs: norecurse nounwind readnone
+define i128 @uc2ui128(i8 zeroext %0) {
+; CHECK-LABEL: uc2ui128:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    adds.w.zx %s0, %s0, (0)1
+; CHECK-NEXT:    or %s1, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  %2 = zext i8 %0 to i128
+  ret i128 %2
+}

diff  --git a/llvm/test/CodeGen/VE/constants.ll b/llvm/test/CodeGen/VE/constants.ll
new file mode 100644
index 000000000000..3c899156af1d
--- /dev/null
+++ b/llvm/test/CodeGen/VE/constants.ll
@@ -0,0 +1,355 @@
+; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
+
+define i8 @p0i8() {
+; CHECK-LABEL: p0i8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 0
+}
+
+define signext i8 @p0si8() {
+; CHECK-LABEL: p0si8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 0
+}
+
+define zeroext i8 @p0zi8() {
+; CHECK-LABEL: p0zi8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 0
+}
+
+define i8 @p128i8() {
+; CHECK-LABEL: p128i8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 128
+}
+
+define signext i8 @p128si8() {
+; CHECK-LABEL: p128si8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 128
+}
+
+define zeroext i8 @p128zi8() {
+; CHECK-LABEL: p128zi8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 128
+}
+
+define i8 @p256i8() {
+; CHECK-LABEL: p256i8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 256
+}
+
+define signext i8 @p256si8() {
+; CHECK-LABEL: p256si8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 256
+}
+
+define zeroext i8 @p256zi8() {
+; CHECK-LABEL: p256zi8:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i8 256
+}
+
+define i16 @p0i16() {
+; CHECK-LABEL: p0i16:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i16 0
+}
+
+define signext i16 @p0si16() {
+; CHECK-LABEL: p0si16:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i16 0
+}
+
+define zeroext i16 @p0zi16() {
+; CHECK-LABEL: p0zi16:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i16 0
+}
+
+define i32 @p0i32() {
+; CHECK-LABEL: p0i32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 0
+}
+
+define signext i32 @p0si32() {
+; CHECK-LABEL: p0si32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 0
+}
+
+define zeroext i32 @p0zi32() {
+; CHECK-LABEL: p0zi32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 0
+}
+
+define i32 @p128i32() {
+; CHECK-LABEL: p128i32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 128
+}
+
+define signext i32 @p128si32() {
+; CHECK-LABEL: p128si32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 128
+}
+
+define zeroext i32 @p128zi32() {
+; CHECK-LABEL: p128zi32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i32 128
+}
+
+define i64 @p0i64() {
+; CHECK-LABEL: p0i64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 0
+}
+
+define signext i64 @p0si64() {
+; CHECK-LABEL: p0si64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 0
+}
+
+define zeroext i64 @p0zi64() {
+; CHECK-LABEL: p0zi64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    or %s0, 0, (0)1
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 0
+}
+
+define i64 @p128i64() {
+; CHECK-LABEL: p128i64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 128
+}
+
+define signext i64 @p128si64() {
+; CHECK-LABEL: p128si64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 128
+}
+
+define zeroext i64 @p128zi64() {
+; CHECK-LABEL: p128zi64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 128
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 128
+}
+
+define i64 @p2264924160i64() {
+; CHECK-LABEL: p2264924160i64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2030043136
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 2264924160
+}
+
+define signext i64 @p2264924160si64() {
+; CHECK-LABEL: p2264924160si64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2030043136
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 2264924160
+}
+
+define zeroext i64 @p2264924160zi64() {
+; CHECK-LABEL: p2264924160zi64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2030043136
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 2264924160
+}
+
+define i64 @p2147483647i64() {
+; CHECK-LABEL: p2147483647i64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 2147483647
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 2147483647
+}
+
+define signext i64 @p2147483647si64() {
+; CHECK-LABEL: p2147483647si64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 2147483647
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 2147483647
+}
+
+define zeroext i64 @p2147483647zi64() {
+; CHECK-LABEL: p2147483647zi64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 2147483647
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 2147483647
+}
+
+define i64 @p15032385535i64() {
+; CHECK-LABEL: p15032385535i64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 2147483647
+; CHECK-NEXT:    lea.sl %s0, 3(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 15032385535
+}
+
+define signext i64 @p15032385535si64() {
+; CHECK-LABEL: p15032385535si64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 2147483647
+; CHECK-NEXT:    lea.sl %s0, 3(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 15032385535
+}
+
+define zeroext i64 @p15032385535zi64() {
+; CHECK-LABEL: p15032385535zi64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 2147483647
+; CHECK-NEXT:    lea.sl %s0, 3(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 15032385535
+}
+
+define i64 @p15032385536i64() {
+; CHECK-LABEL: p15032385536i64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, 3(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 15032385536
+}
+
+define signext i64 @p15032385536si64() {
+; CHECK-LABEL: p15032385536si64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, 3(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 15032385536
+}
+
+define zeroext i64 @p15032385536zi64() {
+; CHECK-LABEL: p15032385536zi64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -2147483648
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, 3(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret i64 15032385536
+}
+
+define float @m5f32() {
+; CHECK-LABEL: m5f32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea.sl %s0, -1063256064
+; CHECK-NEXT:    or %s0, 0, %s0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret float -5.000000e+00
+}
+
+define double @m5f64() {
+; CHECK-LABEL: m5f64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea.sl %s0, -1072431104
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret double -5.000000e+00
+}
+
+define float @p2p3f32() {
+; CHECK-LABEL: p2p3f32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea.sl %s0, 1075000115
+; CHECK-NEXT:    or %s0, 0, %s0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret float 0x4002666660000000 ; 2.3
+}
+
+define double @p2p3f64() {
+; CHECK-LABEL: p2p3f64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, 1717986918
+; CHECK-NEXT:    lea.sl %s0, 1073899110(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret double 2.3
+}
+
+define float @p128p3f32() {
+; CHECK-LABEL: p128p3f32:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea.sl %s0, 1124093133
+; CHECK-NEXT:    or %s0, 0, %s0
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret float 0x40600999A0000000 ; 128.3
+}
+
+define double @p128p3f64() {
+; CHECK-LABEL: p128p3f64:
+; CHECK:       .LBB{{[0-9]+}}_2:
+; CHECK-NEXT:    lea %s0, -1717986918
+; CHECK-NEXT:    and %s0, %s0, (32)0
+; CHECK-NEXT:    lea.sl %s0, 1080035737(%s0)
+; CHECK-NEXT:    or %s11, 0, %s9
+  ret double 128.3
+}

diff  --git a/llvm/test/CodeGen/VE/constants_i64.ll b/llvm/test/CodeGen/VE/constants_i64.ll
deleted file mode 100644
index 432045d0d49d..000000000000
--- a/llvm/test/CodeGen/VE/constants_i64.ll
+++ /dev/null
@@ -1,157 +0,0 @@
-; RUN: llc < %s -mtriple=ve-unknown-unknown | FileCheck %s
-
-define i64 @p0i64() {
-; CHECK-LABEL: p0i64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  or %s0, 0, (0)1
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 0
-}
-
-define signext i64 @p0si64() {
-; CHECK-LABEL: p0si64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  or %s0, 0, (0)1
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 0
-}
-
-define zeroext i64 @p0zi64() {
-; CHECK-LABEL: p0zi64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  or %s0, 0, (0)1
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 0
-}
-
-define i64 @p128i64() {
-; CHECK-LABEL: p128i64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 128
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 128
-}
-
-define signext i64 @p128si64() {
-; CHECK-LABEL: p128si64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 128
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 128
-}
-
-define zeroext i64 @p128zi64() {
-; CHECK-LABEL: p128zi64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 128
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 128
-}
-
-define i64 @p2264924160i64() {
-; CHECK-LABEL: p2264924160i64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, -2030043136
-; CHECK-NEXT:  and %s0, %s0, (32)0
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 2264924160
-}
-
-define signext i64 @p2264924160si64() {
-; CHECK-LABEL: p2264924160si64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, -2030043136
-; CHECK-NEXT:  and %s0, %s0, (32)0
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 2264924160
-}
-
-define zeroext i64 @p2264924160zi64() {
-; CHECK-LABEL: p2264924160zi64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, -2030043136
-; CHECK-NEXT:  and %s0, %s0, (32)0
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 2264924160
-}
-
-define i64 @p2147483647i64() {
-; CHECK-LABEL: p2147483647i64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 2147483647
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 2147483647
-}
-
-define signext i64 @p2147483647si64() {
-; CHECK-LABEL: p2147483647si64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 2147483647
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 2147483647
-}
-
-define zeroext i64 @p2147483647zi64() {
-; CHECK-LABEL: p2147483647zi64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 2147483647
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 2147483647
-}
-
-define i64 @p15032385535i64() {
-; CHECK-LABEL: p15032385535i64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 2147483647
-; CHECK-NEXT:  lea.sl %s0, 3(%s0)
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 15032385535
-}
-
-define signext i64 @p15032385535si64() {
-; CHECK-LABEL: p15032385535si64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 2147483647
-; CHECK-NEXT:  lea.sl %s0, 3(%s0)
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 15032385535
-}
-
-define zeroext i64 @p15032385535zi64() {
-; CHECK-LABEL: p15032385535zi64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, 2147483647
-; CHECK-NEXT:  lea.sl %s0, 3(%s0)
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 15032385535
-}
-
-define i64 @p15032385536i64() {
-; CHECK-LABEL: p15032385536i64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, -2147483648
-; CHECK-NEXT:  and %s0, %s0, (32)0
-; CHECK-NEXT:  lea.sl %s0, 3(%s0)
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 15032385536
-}
-
-define signext i64 @p15032385536si64() {
-; CHECK-LABEL: p15032385536si64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, -2147483648
-; CHECK-NEXT:  and %s0, %s0, (32)0
-; CHECK-NEXT:  lea.sl %s0, 3(%s0)
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 15032385536
-}
-
-define zeroext i64 @p15032385536zi64() {
-; CHECK-LABEL: p15032385536zi64:
-; CHECK:       .LBB{{[0-9]+}}_2:
-; CHECK-NEXT:  lea %s0, -2147483648
-; CHECK-NEXT:  and %s0, %s0, (32)0
-; CHECK-NEXT:  lea.sl %s0, 3(%s0)
-; CHECK-NEXT:  or %s11, 0, %s9
-  ret i64 15032385536
-}


        


More information about the llvm-commits mailing list