[llvm] r365178 - [ARM] MVE VMOV immediate handling

David Green via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 5 03:02:44 PDT 2019


Author: dmgreen
Date: Fri Jul  5 03:02:43 2019
New Revision: 365178

URL: http://llvm.org/viewvc/llvm-project?rev=365178&view=rev
Log:
[ARM] MVE VMOV immediate handling

This adds some handling for VMOVimm, using the same method that NEON uses. We
create VMOVIMM/VMVNIMM/VMOVFPIMM nodes based on the immediate, and select them
using the now renamed ARMvmovImm/etc. There is also an extra 64bit immediate
mode that I have not yet added here.

Code by David Sherwood

Differential Revision: https://reviews.llvm.org/D63884

Added:
    llvm/trunk/test/CodeGen/Thumb2/mve-vmovimm.ll
    llvm/trunk/test/CodeGen/Thumb2/mve-vmvnimm.ll
Modified:
    llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
    llvm/trunk/lib/Target/ARM/ARMISelLowering.h
    llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
    llvm/trunk/lib/Target/ARM/ARMInstrMVE.td
    llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
    llvm/trunk/test/CodeGen/Thumb2/mve-loadstore.ll

Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=365178&r1=365177&r2=365178&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Fri Jul  5 03:02:43 2019
@@ -823,9 +823,6 @@ ARMTargetLowering::ARMTargetLowering(con
     setTargetDAGCombine(ISD::SIGN_EXTEND);
     setTargetDAGCombine(ISD::ZERO_EXTEND);
     setTargetDAGCombine(ISD::ANY_EXTEND);
-    setTargetDAGCombine(ISD::BUILD_VECTOR);
-    setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
-    setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
     setTargetDAGCombine(ISD::STORE);
     setTargetDAGCombine(ISD::FP_TO_SINT);
     setTargetDAGCombine(ISD::FP_TO_UINT);
@@ -843,6 +840,12 @@ ARMTargetLowering::ARMTargetLowering(con
     }
   }
 
+  if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
+    setTargetDAGCombine(ISD::BUILD_VECTOR);
+    setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
+    setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
+  }
+
   if (!Subtarget->hasFP64()) {
     // When targeting a floating-point unit with only single-precision
     // operations, f64 is legal for the few double-precision instructions which
@@ -5942,7 +5945,7 @@ static SDValue LowerSETCCCARRY(SDValue O
 }
 
 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
-/// valid vector constant for a NEON instruction with a "modified immediate"
+/// valid vector constant for a NEON or MVE instruction with a "modified immediate"
 /// operand (e.g., VMOV).  If so, return the encoded value.
 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
                                  unsigned SplatBitSize, SelectionDAG &DAG,
@@ -6028,6 +6031,10 @@ static SDValue isNEONModifiedImm(uint64_
       break;
     }
 
+    // cmode == 0b1101 is not supported for MVE VMVN
+    if (type == MVEVMVNModImm)
+      return SDValue();
+
     if ((SplatBits & ~0xffffff) == 0 &&
         ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
       // Value = 0x00nnffff: Op=x, Cmode=1101.
@@ -6594,13 +6601,15 @@ SDValue ARMTargetLowering::LowerBUILD_VE
     if (SplatUndef.isAllOnesValue())
       return DAG.getUNDEF(VT);
 
-    if (ST->hasNEON() && SplatBitSize <= 64) {
+    if ((ST->hasNEON() && SplatBitSize <= 64) ||
+        (ST->hasMVEIntegerOps() && SplatBitSize <= 32)) {
       // Check if an immediate VMOV works.
       EVT VmovVT;
       SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
                                       SplatUndef.getZExtValue(), SplatBitSize,
                                       DAG, dl, VmovVT, VT.is128BitVector(),
                                       VMOVModImm);
+
       if (Val.getNode()) {
         SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
@@ -6608,10 +6617,10 @@ SDValue ARMTargetLowering::LowerBUILD_VE
 
       // Try an immediate VMVN.
       uint64_t NegatedImm = (~SplatBits).getZExtValue();
-      Val = isNEONModifiedImm(NegatedImm,
-                                      SplatUndef.getZExtValue(), SplatBitSize,
-                                      DAG, dl, VmovVT, VT.is128BitVector(),
-                                      VMVNModImm);
+      Val = isNEONModifiedImm(
+          NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
+          DAG, dl, VmovVT, VT.is128BitVector(),
+          ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm);
       if (Val.getNode()) {
         SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
         return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);

Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.h?rev=365178&r1=365177&r2=365178&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.h (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.h Fri Jul  5 03:02:43 2019
@@ -833,6 +833,7 @@ class VectorType;
   enum NEONModImmType {
     VMOVModImm,
     VMVNModImm,
+    MVEVMVNModImm,
     OtherModImm
   };
 

Modified: llvm/trunk/lib/Target/ARM/ARMInstrInfo.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrInfo.td?rev=365178&r1=365177&r2=365178&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrInfo.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrInfo.td Fri Jul  5 03:02:43 2019
@@ -249,6 +249,11 @@ def SDTARMVGETLN  : SDTypeProfile<1, 2,
 def ARMvgetlaneu : SDNode<"ARMISD::VGETLANEu", SDTARMVGETLN>;
 def ARMvgetlanes : SDNode<"ARMISD::VGETLANEs", SDTARMVGETLN>;
 
+def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
+def ARMvmovImm   : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
+def ARMvmvnImm   : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
+def ARMvmovFPImm : SDNode<"ARMISD::VMOVFPIMM", SDTARMVMOVIMM>;
+
 def ARMWLS : SDNode<"ARMISD::WLS", SDT_ARMWhileLoop,
                     [SDNPHasChain]>;
 

Modified: llvm/trunk/lib/Target/ARM/ARMInstrMVE.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrMVE.td?rev=365178&r1=365177&r2=365178&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrMVE.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrMVE.td Fri Jul  5 03:02:43 2019
@@ -2192,6 +2192,23 @@ def MVE_VMVNimmi32 : MVE_mod_imm<"vmvn",
 }
 } // let isReMaterializable = 1
 
+let Predicates = [HasMVEInt] in {
+  def : Pat<(v16i8 (ARMvmovImm timm:$simm)),
+            (v16i8 (MVE_VMOVimmi8  nImmSplatI8:$simm))>;
+  def : Pat<(v8i16 (ARMvmovImm timm:$simm)),
+            (v8i16 (MVE_VMOVimmi16 nImmSplatI16:$simm))>;
+  def : Pat<(v4i32 (ARMvmovImm timm:$simm)),
+            (v4i32 (MVE_VMOVimmi32 nImmVMOVI32:$simm))>;
+
+  def : Pat<(v8i16 (ARMvmvnImm timm:$simm)),
+            (v8i16 (MVE_VMVNimmi16 nImmSplatI16:$simm))>;
+  def : Pat<(v4i32 (ARMvmvnImm timm:$simm)),
+            (v4i32 (MVE_VMVNimmi32 nImmVMOVI32:$simm))>;
+
+  def : Pat<(v4f32 (ARMvmovFPImm timm:$simm)),
+            (v4f32 (MVE_VMOVimmf32 nImmVMOVF32:$simm))>;
+}
+
 class MVE_VMINMAXA<string iname, string suffix, bits<2> size,
                    bit bit_12, list<dag> pattern=[]>
   : MVE_p<(outs MQPR:$Qd), (ins MQPR:$Qd_src, MQPR:$Qm),

Modified: llvm/trunk/lib/Target/ARM/ARMInstrNEON.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMInstrNEON.td?rev=365178&r1=365177&r2=365178&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMInstrNEON.td (original)
+++ llvm/trunk/lib/Target/ARM/ARMInstrNEON.td Fri Jul  5 03:02:43 2019
@@ -526,11 +526,6 @@ def NEONvqrshrnsu : SDNode<"ARMISD::VQRS
 def NEONvsli      : SDNode<"ARMISD::VSLI", SDTARMVSHINS>;
 def NEONvsri      : SDNode<"ARMISD::VSRI", SDTARMVSHINS>;
 
-def SDTARMVMOVIMM : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisVT<1, i32>]>;
-def NEONvmovImm   : SDNode<"ARMISD::VMOVIMM", SDTARMVMOVIMM>;
-def NEONvmvnImm   : SDNode<"ARMISD::VMVNIMM", SDTARMVMOVIMM>;
-def NEONvmovFPImm : SDNode<"ARMISD::VMOVFPIMM", SDTARMVMOVIMM>;
-
 def SDTARMVORRIMM : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0, 1>,
                                            SDTCisVT<2, i32>]>;
 def NEONvorrImm   : SDNode<"ARMISD::VORRIMM", SDTARMVORRIMM>;
@@ -566,14 +561,14 @@ def NEONvtbl1     : SDNode<"ARMISD::VTBL
 def NEONvtbl2     : SDNode<"ARMISD::VTBL2", SDTARMVTBL2>;
 
 
-def NEONimmAllZerosV: PatLeaf<(NEONvmovImm (i32 timm)), [{
+def NEONimmAllZerosV: PatLeaf<(ARMvmovImm (i32 timm)), [{
   ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
   unsigned EltBits = 0;
   uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
   return (EltBits == 32 && EltVal == 0);
 }]>;
 
-def NEONimmAllOnesV: PatLeaf<(NEONvmovImm (i32 timm)), [{
+def NEONimmAllOnesV: PatLeaf<(ARMvmovImm (i32 timm)), [{
   ConstantSDNode *ConstVal = cast<ConstantSDNode>(N->getOperand(0));
   unsigned EltBits = 0;
   uint64_t EltVal = ARM_AM::decodeNEONModImm(ConstVal->getZExtValue(), EltBits);
@@ -5345,28 +5340,28 @@ let isReMaterializable = 1 in {
 def VMVNv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 1, 1, (outs DPR:$Vd),
                          (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
                          "vmvn", "i16", "$Vd, $SIMM", "",
-                         [(set DPR:$Vd, (v4i16 (NEONvmvnImm timm:$SIMM)))]> {
+                         [(set DPR:$Vd, (v4i16 (ARMvmvnImm timm:$SIMM)))]> {
   let Inst{9} = SIMM{9};
 }
 
 def VMVNv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 1, 1, (outs QPR:$Vd),
                          (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
                          "vmvn", "i16", "$Vd, $SIMM", "",
-                         [(set QPR:$Vd, (v8i16 (NEONvmvnImm timm:$SIMM)))]> {
+                         [(set QPR:$Vd, (v8i16 (ARMvmvnImm timm:$SIMM)))]> {
   let Inst{9} = SIMM{9};
 }
 
 def VMVNv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 1, 1, (outs DPR:$Vd),
                          (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
                          "vmvn", "i32", "$Vd, $SIMM", "",
-                         [(set DPR:$Vd, (v2i32 (NEONvmvnImm timm:$SIMM)))]> {
+                         [(set DPR:$Vd, (v2i32 (ARMvmvnImm timm:$SIMM)))]> {
   let Inst{11-8} = SIMM{11-8};
 }
 
 def VMVNv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 1, 1, (outs QPR:$Vd),
                          (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
                          "vmvn", "i32", "$Vd, $SIMM", "",
-                         [(set QPR:$Vd, (v4i32 (NEONvmvnImm timm:$SIMM)))]> {
+                         [(set QPR:$Vd, (v4i32 (ARMvmvnImm timm:$SIMM)))]> {
   let Inst{11-8} = SIMM{11-8};
 }
 }
@@ -6053,57 +6048,57 @@ let isReMaterializable = 1, isAsCheapAsA
 def VMOVv8i8  : N1ModImm<1, 0b000, 0b1110, 0, 0, 0, 1, (outs DPR:$Vd),
                          (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
                          "vmov", "i8", "$Vd, $SIMM", "",
-                         [(set DPR:$Vd, (v8i8 (NEONvmovImm timm:$SIMM)))]>;
+                         [(set DPR:$Vd, (v8i8 (ARMvmovImm timm:$SIMM)))]>;
 def VMOVv16i8 : N1ModImm<1, 0b000, 0b1110, 0, 1, 0, 1, (outs QPR:$Vd),
                          (ins nImmSplatI8:$SIMM), IIC_VMOVImm,
                          "vmov", "i8", "$Vd, $SIMM", "",
-                         [(set QPR:$Vd, (v16i8 (NEONvmovImm timm:$SIMM)))]>;
+                         [(set QPR:$Vd, (v16i8 (ARMvmovImm timm:$SIMM)))]>;
 
 def VMOVv4i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 0, 0, 1, (outs DPR:$Vd),
                          (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
                          "vmov", "i16", "$Vd, $SIMM", "",
-                         [(set DPR:$Vd, (v4i16 (NEONvmovImm timm:$SIMM)))]> {
+                         [(set DPR:$Vd, (v4i16 (ARMvmovImm timm:$SIMM)))]> {
   let Inst{9} = SIMM{9};
 }
 
 def VMOVv8i16 : N1ModImm<1, 0b000, {1,0,?,0}, 0, 1, 0, 1, (outs QPR:$Vd),
                          (ins nImmSplatI16:$SIMM), IIC_VMOVImm,
                          "vmov", "i16", "$Vd, $SIMM", "",
-                         [(set QPR:$Vd, (v8i16 (NEONvmovImm timm:$SIMM)))]> {
+                         [(set QPR:$Vd, (v8i16 (ARMvmovImm timm:$SIMM)))]> {
  let Inst{9} = SIMM{9};
 }
 
 def VMOVv2i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 0, 0, 1, (outs DPR:$Vd),
                          (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
                          "vmov", "i32", "$Vd, $SIMM", "",
-                         [(set DPR:$Vd, (v2i32 (NEONvmovImm timm:$SIMM)))]> {
+                         [(set DPR:$Vd, (v2i32 (ARMvmovImm timm:$SIMM)))]> {
   let Inst{11-8} = SIMM{11-8};
 }
 
 def VMOVv4i32 : N1ModImm<1, 0b000, {?,?,?,?}, 0, 1, 0, 1, (outs QPR:$Vd),
                          (ins nImmVMOVI32:$SIMM), IIC_VMOVImm,
                          "vmov", "i32", "$Vd, $SIMM", "",
-                         [(set QPR:$Vd, (v4i32 (NEONvmovImm timm:$SIMM)))]> {
+                         [(set QPR:$Vd, (v4i32 (ARMvmovImm timm:$SIMM)))]> {
   let Inst{11-8} = SIMM{11-8};
 }
 
 def VMOVv1i64 : N1ModImm<1, 0b000, 0b1110, 0, 0, 1, 1, (outs DPR:$Vd),
                          (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
                          "vmov", "i64", "$Vd, $SIMM", "",
-                         [(set DPR:$Vd, (v1i64 (NEONvmovImm timm:$SIMM)))]>;
+                         [(set DPR:$Vd, (v1i64 (ARMvmovImm timm:$SIMM)))]>;
 def VMOVv2i64 : N1ModImm<1, 0b000, 0b1110, 0, 1, 1, 1, (outs QPR:$Vd),
                          (ins nImmSplatI64:$SIMM), IIC_VMOVImm,
                          "vmov", "i64", "$Vd, $SIMM", "",
-                         [(set QPR:$Vd, (v2i64 (NEONvmovImm timm:$SIMM)))]>;
+                         [(set QPR:$Vd, (v2i64 (ARMvmovImm timm:$SIMM)))]>;
 
 def VMOVv2f32 : N1ModImm<1, 0b000, 0b1111, 0, 0, 0, 1, (outs DPR:$Vd),
                          (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
                          "vmov", "f32", "$Vd, $SIMM", "",
-                         [(set DPR:$Vd, (v2f32 (NEONvmovFPImm timm:$SIMM)))]>;
+                         [(set DPR:$Vd, (v2f32 (ARMvmovFPImm timm:$SIMM)))]>;
 def VMOVv4f32 : N1ModImm<1, 0b000, 0b1111, 0, 1, 0, 1, (outs QPR:$Vd),
                          (ins nImmVMOVF32:$SIMM), IIC_VMOVImm,
                          "vmov", "f32", "$Vd, $SIMM", "",
-                         [(set QPR:$Vd, (v4f32 (NEONvmovFPImm timm:$SIMM)))]>;
+                         [(set QPR:$Vd, (v4f32 (ARMvmovFPImm timm:$SIMM)))]>;
 } // isReMaterializable, isAsCheapAsAMove
 
 // Add support for bytes replication feature, so it could be GAS compatible.

Modified: llvm/trunk/test/CodeGen/Thumb2/mve-loadstore.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-loadstore.ll?rev=365178&r1=365177&r2=365178&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-loadstore.ll (original)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-loadstore.ll Fri Jul  5 03:02:43 2019
@@ -92,8 +92,7 @@ define arm_aapcs_vfpcc <4 x i32> @loadst
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #40
 ; CHECK-NEXT:    sub sp, #40
-; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    vdup.32 q0, r0
+; CHECK-NEXT:    vmov.i32 q0, #0x1
 ; CHECK-NEXT:    mov r0, sp
 ; CHECK-NEXT:    vstrw.32 q0, [r0]
 ; CHECK-NEXT:    movs r0, #3
@@ -121,8 +120,7 @@ define arm_aapcs_vfpcc <8 x i16> @loadst
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #40
 ; CHECK-NEXT:    sub sp, #40
-; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    vdup.16 q0, r0
+; CHECK-NEXT:    vmov.i16 q0, #0x1
 ; CHECK-NEXT:    mov r0, sp
 ; CHECK-NEXT:    vstrh.16 q0, [r0]
 ; CHECK-NEXT:    movs r0, #3
@@ -150,8 +148,7 @@ define arm_aapcs_vfpcc <16 x i8> @loadst
 ; CHECK:       @ %bb.0: @ %entry
 ; CHECK-NEXT:    .pad #40
 ; CHECK-NEXT:    sub sp, #40
-; CHECK-NEXT:    movs r0, #1
-; CHECK-NEXT:    vdup.8 q0, r0
+; CHECK-NEXT:    vmov.i8 q0, #0x1
 ; CHECK-NEXT:    mov r0, sp
 ; CHECK-NEXT:    vstrb.8 q0, [r0]
 ; CHECK-NEXT:    movs r0, #3

Added: llvm/trunk/test/CodeGen/Thumb2/mve-vmovimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vmovimm.ll?rev=365178&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vmovimm.ll (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vmovimm.ll Fri Jul  5 03:02:43 2019
@@ -0,0 +1,243 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc <16 x i8> @mov_int8_1() {
+; CHECK-LABEL: mov_int8_1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i8 q0, #0x1
+; CHECK-NEXT:    bx lr
+entry:
+  ret <16 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>
+}
+
+define arm_aapcs_vfpcc <16 x i8> @mov_int8_m1(i8 *%dest) {
+; CHECK-LABEL: mov_int8_m1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    bx lr
+entry:
+  ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mov_int16_1(i16 *%dest) {
+; CHECK-LABEL: mov_int16_1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i16 q0, #0x1
+; CHECK-NEXT:    bx lr
+entry:
+  ret <8 x i16> <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mov_int16_m1(i16 *%dest) {
+; CHECK-LABEL: mov_int16_m1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    bx lr
+entry:
+  ret <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mov_int16_256(i16 *%dest) {
+; CHECK-LABEL: mov_int16_256:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i16 q0, #0x100
+; CHECK-NEXT:    bx lr
+entry:
+  ret <8 x i16> <i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256, i16 256>
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mov_int16_257() {
+; CHECK-LABEL: mov_int16_257:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i8 q0, #0x1
+; CHECK-NEXT:    bx lr
+entry:
+  ret <8 x i16> <i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257, i16 257>
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mov_int16_258(i16 *%dest) {
+; CHECK-LABEL: mov_int16_258:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    adr r0, .LCPI6_0
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI6_0:
+; CHECK-NEXT:    .long 16908546 @ double 8.204306265173532E-304
+; CHECK-NEXT:    .long 16908546
+; CHECK-NEXT:    .long 16908546 @ double 8.204306265173532E-304
+; CHECK-NEXT:    .long 16908546
+entry:
+  ret <8 x i16> <i16 258, i16 258, i16 258, i16 258, i16 258, i16 258, i16 258, i16 258>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_1(i32 *%dest) {
+; CHECK-LABEL: mov_int32_1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q0, #0x1
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_256(i32 *%dest) {
+; CHECK-LABEL: mov_int32_256:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q0, #0x100
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 256, i32 256, i32 256, i32 256>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_65536(i32 *%dest) {
+; CHECK-LABEL: mov_int32_65536:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q0, #0x10000
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 65536, i32 65536, i32 65536, i32 65536>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_16777216(i32 *%dest) {
+; CHECK-LABEL: mov_int32_16777216:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q0, #0x1000000
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 16777216, i32 16777216, i32 16777216, i32 16777216>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_16777217(i32 *%dest) {
+; CHECK-LABEL: mov_int32_16777217:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    adr r0, .LCPI11_0
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI11_0:
+; CHECK-NEXT:    .long 16777217 @ double 7.2911290000737531E-304
+; CHECK-NEXT:    .long 16777217
+; CHECK-NEXT:    .long 16777217 @ double 7.2911290000737531E-304
+; CHECK-NEXT:    .long 16777217
+entry:
+  ret <4 x i32> <i32 16777217, i32 16777217, i32 16777217, i32 16777217>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_17919(i32 *%dest) {
+; CHECK-LABEL: mov_int32_17919:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q0, #0x45ff
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 17919, i32 17919, i32 17919, i32 17919>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_4587519(i32 *%dest) {
+; CHECK-LABEL: mov_int32_4587519:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i32 q0, #0x45ffff
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 4587519, i32 4587519, i32 4587519, i32 4587519>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_m1(i32 *%dest) {
+; CHECK-LABEL: mov_int32_m1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i8 q0, #0xff
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_4294901760(i32 *%dest) {
+; CHECK-LABEL: mov_int32_4294901760:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i32 q0, #0xffff
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_4278190335(i32 *%dest) {
+; CHECK-LABEL: mov_int32_4278190335:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    adr r0, .LCPI16_0
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI16_0:
+; CHECK-NEXT:    .long 4278190335 @ double -5.4874634341155774E+303
+; CHECK-NEXT:    .long 4278190335
+; CHECK-NEXT:    .long 4278190335 @ double -5.4874634341155774E+303
+; CHECK-NEXT:    .long 4278190335
+entry:
+  ret <4 x i32> <i32 4278190335, i32 4278190335, i32 4278190335, i32 4278190335>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_4278255615(i32 *%dest) {
+; CHECK-LABEL: mov_int32_4278255615:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i32 q0, #0xff0000
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 4278255615, i32 4278255615, i32 4278255615, i32 4278255615>
+}
+
+define arm_aapcs_vfpcc <4 x float> @mov_float_1(float *%dest) {
+; CHECK-LABEL: mov_float_1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    adr r0, .LCPI18_0
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI18_0:
+; CHECK-NEXT:    .long 1065353216 @ double 0.007812501848093234
+; CHECK-NEXT:    .long 1065353216
+; CHECK-NEXT:    .long 1065353216 @ double 0.007812501848093234
+; CHECK-NEXT:    .long 1065353216
+entry:
+  ret <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
+}
+
+define arm_aapcs_vfpcc <4 x float> @mov_float_m3(float *%dest) {
+; CHECK-LABEL: mov_float_m3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    adr r0, .LCPI19_0
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI19_0:
+; CHECK-NEXT:    .long 3225419776 @ double -32.000022917985916
+; CHECK-NEXT:    .long 3225419776
+; CHECK-NEXT:    .long 3225419776 @ double -32.000022917985916
+; CHECK-NEXT:    .long 3225419776
+entry:
+  ret <4 x float> <float -3.000000e+00, float -3.000000e+00, float -3.000000e+00, float -3.000000e+00>
+}
+
+define arm_aapcs_vfpcc <8 x half> @mov_float16_1(half *%dest) {
+; CHECK-LABEL: mov_float16_1:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i16 q0, #0x3c00
+; CHECK-NEXT:    bx lr
+
+entry:
+  ret <8 x half> <half 1.000000e+00, half 1.000000e+00, half 1.000000e+00, half 1.000000e+00, half 1.000000e+00, half 1.000000e+00, half 1.000000e+00, half 1.000000e+00>
+}
+
+define arm_aapcs_vfpcc <8 x half> @mov_float16_m3(half *%dest) {
+; CHECK-LABEL: mov_float16_m3:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmov.i16 q0, #0xc200
+; CHECK-NEXT:    bx lr
+
+entry:
+  ret <8 x half> <half -3.000000e+00, half -3.000000e+00, half -3.000000e+00, half -3.000000e+00, half -3.000000e+00, half -3.000000e+00, half -3.000000e+00, half -3.000000e+00>
+}

Added: llvm/trunk/test/CodeGen/Thumb2/mve-vmvnimm.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Thumb2/mve-vmvnimm.ll?rev=365178&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Thumb2/mve-vmvnimm.ll (added)
+++ llvm/trunk/test/CodeGen/Thumb2/mve-vmvnimm.ll Fri Jul  5 03:02:43 2019
@@ -0,0 +1,83 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s
+; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve.fp -verify-machineinstrs %s -o - | FileCheck %s
+
+define arm_aapcs_vfpcc <8 x i16> @mov_int16_511(i16 *%dest) {
+; CHECK-LABEL: mov_int16_511:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i16 q0, #0xfe00
+; CHECK-NEXT:    bx lr
+entry:
+  ret <8 x i16> <i16 511, i16 511, i16 511, i16 511, i16 511, i16 511, i16 511, i16 511>
+}
+
+define arm_aapcs_vfpcc <8 x i16> @mov_int16_65281(i16 *%dest) {
+; CHECK-LABEL: mov_int16_65281:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i16 q0, #0xfe
+; CHECK-NEXT:    bx lr
+entry:
+  ret <8 x i16> <i16 65281, i16 65281, i16 65281, i16 65281, i16 65281, i16 65281, i16 65281, i16 65281>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_m7(i32 *%dest) {
+; CHECK-LABEL: mov_int32_m7:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i32 q0, #0x6
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 -7, i32 -7, i32 -7, i32 -7>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_m769(i32 *%dest) {
+; CHECK-LABEL: mov_int32_m769:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i32 q0, #0x300
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 -769, i32 -769, i32 -769, i32 -769>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_m262145(i32 *%dest) {
+; CHECK-LABEL: mov_int32_m262145:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i32 q0, #0x40000
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 -262145, i32 -262145, i32 -262145, i32 -262145>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_m134217729(i32 *%dest) {
+; CHECK-LABEL: mov_int32_m134217729:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i32 q0, #0x8000000
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 -134217729, i32 -134217729, i32 -134217729, i32 -134217729>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_4294902528(i32 *%dest) {
+; CHECK-LABEL: mov_int32_4294902528:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    vmvn.i32 q0, #0xfcff
+; CHECK-NEXT:    bx lr
+entry:
+  ret <4 x i32> <i32 4294902528, i32 4294902528, i32 4294902528, i32 4294902528>
+}
+
+define arm_aapcs_vfpcc <4 x i32> @mov_int32_4278386688(i32 *%dest) {
+; CHECK-LABEL: mov_int32_4278386688:
+; CHECK:       @ %bb.0: @ %entry
+; CHECK-NEXT:    adr r0, .LCPI7_0
+; CHECK-NEXT:    vldrw.u32 q0, [r0]
+; CHECK-NEXT:    bx lr
+; CHECK-NEXT:    .p2align 4
+; CHECK-NEXT:  @ %bb.1:
+; CHECK-NEXT:  .LCPI7_0:
+; CHECK-NEXT:    .long 4278386688 @ double -6.5147775434702224E+303
+; CHECK-NEXT:    .long 4278386688
+; CHECK-NEXT:    .long 4278386688 @ double -6.5147775434702224E+303
+; CHECK-NEXT:    .long 4278386688
+entry:
+  ret <4 x i32> <i32 4278386688, i32 4278386688, i32 4278386688, i32 4278386688>
+}




More information about the llvm-commits mailing list