[llvm] r239799 - [AArch64] Generalize extract-high DUP extension to MOVI/MVNI.

Ahmed Bougacha ahmed.bougacha at gmail.com
Mon Jun 15 18:18:14 PDT 2015


Author: ab
Date: Mon Jun 15 20:18:14 2015
New Revision: 239799

URL: http://llvm.org/viewvc/llvm-project?rev=239799&view=rev
Log:
[AArch64] Generalize extract-high DUP extension to MOVI/MVNI.

These are really immediate DUPs, and suffer from the same problem
with long instructions with a high/2 variant (e.g. smull).

By extending a MOVI (or DUP, before this patch), we can avoid an ext
on the other operand of the long instruction, e.g. turning:
    ext.16b v0, v0, v0, #8
    movi.4h v1, #0x53
    smull.4s  v0, v0, v1
into:
    movi.8h v1, #0x53
    smull2.4s  v0, v0, v1

While there, add a now-necessary combine to fold (VT NVCAST (VT x)).

Modified:
    llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/trunk/test/CodeGen/AArch64/arm64-neon-2velem-high.ll

Modified: llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp?rev=239799&r1=239798&r2=239799&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64ISelLowering.cpp Mon Jun 15 20:18:14 2015
@@ -7580,21 +7580,26 @@ static SDValue tryCombineFixedPointConve
 //
 // This routine does the actual conversion of such DUPs, once outer routines
 // have determined that everything else is in order.
+// It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold
+// similarly here.
 static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) {
-  // We can handle most types of duplicate, but the lane ones have an extra
-  // operand saying *which* lane, so we need to know.
-  bool IsDUPLANE;
   switch (N.getOpcode()) {
   case AArch64ISD::DUP:
-    IsDUPLANE = false;
-    break;
   case AArch64ISD::DUPLANE8:
   case AArch64ISD::DUPLANE16:
   case AArch64ISD::DUPLANE32:
   case AArch64ISD::DUPLANE64:
-    IsDUPLANE = true;
+  case AArch64ISD::MOVI:
+  case AArch64ISD::MOVIshift:
+  case AArch64ISD::MOVIedit:
+  case AArch64ISD::MOVImsl:
+  case AArch64ISD::MVNIshift:
+  case AArch64ISD::MVNImsl:
     break;
   default:
+    // FMOV could be supported, but isn't very useful, as it would only occur
+    // if you passed a bitcast' floating point immediate to an eligible long
+    // integer op (addl, smull, ...).
     return SDValue();
   }
 
@@ -7604,17 +7609,11 @@ static SDValue tryExtendDUPToExtractHigh
 
   MVT ElementTy = NarrowTy.getVectorElementType();
   unsigned NumElems = NarrowTy.getVectorNumElements();
-  MVT NewDUPVT = MVT::getVectorVT(ElementTy, NumElems * 2);
+  MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2);
 
   SDLoc dl(N);
-  SDValue NewDUP;
-  if (IsDUPLANE)
-    NewDUP = DAG.getNode(N.getOpcode(), dl, NewDUPVT, N.getOperand(0),
-                         N.getOperand(1));
-  else
-    NewDUP = DAG.getNode(AArch64ISD::DUP, dl, NewDUPVT, N.getOperand(0));
-
-  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy, NewDUP,
+  return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy,
+                     DAG.getNode(N->getOpcode(), dl, NewVT, N->ops()),
                      DAG.getConstant(NumElems, dl, MVT::i64));
 }
 
@@ -8913,6 +8912,14 @@ static SDValue performSelectCCCombine(SD
   return DAG.getNode(Opcode, SDLoc(N), N->getValueType(0), LHS, RHS);
 }
 
+/// Get rid of unnecessary NVCASTs (that don't change the type).
+static SDValue performNVCASTCombine(SDNode *N) {
+  if (N->getValueType(0) == N->getOperand(0).getValueType())
+    return N->getOperand(0);
+
+  return SDValue();
+}
+
 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
                                                  DAGCombinerInfo &DCI) const {
   SelectionDAG &DAG = DCI.DAG;
@@ -8955,6 +8962,8 @@ SDValue AArch64TargetLowering::PerformDA
     return performCONDCombine(N, DCI, DAG, 2, 3);
   case AArch64ISD::DUP:
     return performPostLD1Combine(N, DCI, false);
+  case AArch64ISD::NVCAST:
+    return performNVCASTCombine(N);
   case ISD::INSERT_VECTOR_ELT:
     return performPostLD1Combine(N, DCI, true);
   case ISD::INTRINSIC_VOID:

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-neon-2velem-high.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-neon-2velem-high.ll?rev=239799&r1=239798&r2=239799&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-neon-2velem-high.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-neon-2velem-high.ll Mon Jun 15 20:18:14 2015
@@ -16,6 +16,17 @@ entry:
   ret <4 x i32> %vmull15.i.i
 }
 
+define <4 x i32> @test_vmull_high_n_s16_imm(<8 x i16> %a) #0 {
+; CHECK-LABEL: test_vmull_high_n_s16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d
+; CHECK-NEXT: smull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vmull15.i.i = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 29, i16 29, i16 29, i16 29>)
+  ret <4 x i32> %vmull15.i.i
+}
+
 define <2 x i64> @test_vmull_high_n_s32(<4 x i32> %a, i32 %b) #0 {
 ; CHECK-LABEL: test_vmull_high_n_s32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -29,6 +40,17 @@ entry:
   ret <2 x i64> %vmull9.i.i
 }
 
+define <2 x i64> @test_vmull_high_n_s32_imm(<4 x i32> %a) #0 {
+; CHECK-LABEL: test_vmull_high_n_s32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1, msl #8
+; CHECK-NEXT: smull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 511, i32 511>)
+  ret <2 x i64> %vmull9.i.i
+}
+
 define <4 x i32> @test_vmull_high_n_u16(<8 x i16> %a, i16 %b) #0 {
 ; CHECK-LABEL: test_vmull_high_n_u16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -44,6 +66,17 @@ entry:
   ret <4 x i32> %vmull15.i.i
 }
 
+define <4 x i32> @test_vmull_high_n_u16_imm(<8 x i16> %a) #0 {
+; CHECK-LABEL: test_vmull_high_n_u16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x11, lsl #8
+; CHECK-NEXT: umull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vmull15.i.i = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 4352, i16 4352, i16 4352, i16 4352>)
+  ret <4 x i32> %vmull15.i.i
+}
+
 define <2 x i64> @test_vmull_high_n_u32(<4 x i32> %a, i32 %b) #0 {
 ; CHECK-LABEL: test_vmull_high_n_u32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -57,6 +90,17 @@ entry:
   ret <2 x i64> %vmull9.i.i
 }
 
+define <2 x i64> @test_vmull_high_n_u32_imm(<4 x i32> %a) #0 {
+; CHECK-LABEL: test_vmull_high_n_u32_imm:
+; CHECK-NEXT: mvni [[REPLICATE:v[0-9]+]].4s, #0x1, msl #8
+; CHECK-NEXT: umull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vmull9.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 4294966784, i32 4294966784>)
+  ret <2 x i64> %vmull9.i.i
+}
+
 define <4 x i32> @test_vqdmull_high_n_s16(<8 x i16> %a, i16 %b) #0 {
 ; CHECK-LABEL: test_vqdmull_high_n_s16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -72,6 +116,17 @@ entry:
   ret <4 x i32> %vqdmull15.i.i
 }
 
+define <4 x i32> @test_vqdmull_high_n_s16_imm(<8 x i16> %a) #0 {
+; CHECK-LABEL: test_vqdmull_high_n_s16_imm:
+; CHECK-NEXT: mvni [[REPLICATE:v[0-9]+]].8h, #0x11, lsl #8
+; CHECK-NEXT: sqdmull2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %a, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vqdmull15.i.i = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 61183, i16 61183, i16 61183, i16 61183>)
+  ret <4 x i32> %vqdmull15.i.i
+}
+
 define <2 x i64> @test_vqdmull_high_n_s32(<4 x i32> %a, i32 %b) #0 {
 ; CHECK-LABEL: test_vqdmull_high_n_s32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -85,6 +140,17 @@ entry:
   ret <2 x i64> %vqdmull9.i.i
 }
 
+define <2 x i64> @test_vqdmull_high_n_s32_imm(<4 x i32> %a) #0 {
+; CHECK-LABEL: test_vqdmull_high_n_s32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d
+; CHECK-NEXT: sqdmull2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %a, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vqdmull9.i.i = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>)
+  ret <2 x i64> %vqdmull9.i.i
+}
+
 define <4 x i32> @test_vmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) #0 {
 ; CHECK-LABEL: test_vmlal_high_n_s16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -101,6 +167,18 @@ entry:
   ret <4 x i32> %add.i.i
 }
 
+define <4 x i32> @test_vmlal_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: test_vmlal_high_n_s16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d
+; CHECK-NEXT: smlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vmull2.i.i.i = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 29, i16 29, i16 29, i16 29>)
+  %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+  ret <4 x i32> %add.i.i
+}
+
 define <2 x i64> @test_vmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) #0 {
 ; CHECK-LABEL: test_vmlal_high_n_s32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -115,6 +193,18 @@ entry:
   ret <2 x i64> %add.i.i
 }
 
+define <2 x i64> @test_vmlal_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: test_vmlal_high_n_s32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d
+; CHECK-NEXT: smlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>)
+  %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+  ret <2 x i64> %add.i.i
+}
+
 define <4 x i32> @test_vmlal_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) #0 {
 ; CHECK-LABEL: test_vmlal_high_n_u16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -131,6 +221,18 @@ entry:
   ret <4 x i32> %add.i.i
 }
 
+define <4 x i32> @test_vmlal_high_n_u16_imm(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: test_vmlal_high_n_u16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d
+; CHECK-NEXT: umlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vmull2.i.i.i = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 29, i16 29, i16 29, i16 29>)
+  %add.i.i = add <4 x i32> %vmull2.i.i.i, %a
+  ret <4 x i32> %add.i.i
+}
+
 define <2 x i64> @test_vmlal_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) #0 {
 ; CHECK-LABEL: test_vmlal_high_n_u32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -145,6 +247,18 @@ entry:
   ret <2 x i64> %add.i.i
 }
 
+define <2 x i64> @test_vmlal_high_n_u32_imm(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: test_vmlal_high_n_u32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d
+; CHECK-NEXT: umlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>)
+  %add.i.i = add <2 x i64> %vmull2.i.i.i, %a
+  ret <2 x i64> %add.i.i
+}
+
 define <4 x i32> @test_vqdmlal_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) #0 {
 ; CHECK-LABEL: test_vqdmlal_high_n_s16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -161,6 +275,18 @@ entry:
   ret <4 x i32> %vqdmlal17.i.i
 }
 
+define <4 x i32> @test_vqdmlal_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: test_vqdmlal_high_n_s16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d
+; CHECK-NEXT: sqdmlal2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vqdmlal15.i.i = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 29, i16 29, i16 29, i16 29>)
+  %vqdmlal17.i.i = call <4 x i32> @llvm.aarch64.neon.sqadd.v4i32(<4 x i32> %a, <4 x i32> %vqdmlal15.i.i)
+  ret <4 x i32> %vqdmlal17.i.i
+}
+
 define <2 x i64> @test_vqdmlal_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) #0 {
 ; CHECK-LABEL: test_vqdmlal_high_n_s32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -175,6 +301,18 @@ entry:
   ret <2 x i64> %vqdmlal11.i.i
 }
 
+define <2 x i64> @test_vqdmlal_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: test_vqdmlal_high_n_s32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d
+; CHECK-NEXT: sqdmlal2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vqdmlal9.i.i = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>)
+  %vqdmlal11.i.i = call <2 x i64> @llvm.aarch64.neon.sqadd.v2i64(<2 x i64> %a, <2 x i64> %vqdmlal9.i.i)
+  ret <2 x i64> %vqdmlal11.i.i
+}
+
 define <4 x i32> @test_vmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) #0 {
 ; CHECK-LABEL: test_vmlsl_high_n_s16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -191,6 +329,18 @@ entry:
   ret <4 x i32> %sub.i.i
 }
 
+define <4 x i32> @test_vmlsl_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: test_vmlsl_high_n_s16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d
+; CHECK-NEXT: smlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vmull2.i.i.i = call <4 x i32> @llvm.aarch64.neon.smull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 29, i16 29, i16 29, i16 29>)
+  %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+  ret <4 x i32> %sub.i.i
+}
+
 define <2 x i64> @test_vmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) #0 {
 ; CHECK-LABEL: test_vmlsl_high_n_s32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -205,6 +355,18 @@ entry:
   ret <2 x i64> %sub.i.i
 }
 
+define <2 x i64> @test_vmlsl_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: test_vmlsl_high_n_s32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d
+; CHECK-NEXT: smlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.smull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>)
+  %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+  ret <2 x i64> %sub.i.i
+}
+
 define <4 x i32> @test_vmlsl_high_n_u16(<4 x i32> %a, <8 x i16> %b, i16 %c) #0 {
 ; CHECK-LABEL: test_vmlsl_high_n_u16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -221,6 +383,18 @@ entry:
   ret <4 x i32> %sub.i.i
 }
 
+define <4 x i32> @test_vmlsl_high_n_u16_imm(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: test_vmlsl_high_n_u16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d
+; CHECK-NEXT: umlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vmull2.i.i.i = call <4 x i32> @llvm.aarch64.neon.umull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 29, i16 29, i16 29, i16 29>)
+  %sub.i.i = sub <4 x i32> %a, %vmull2.i.i.i
+  ret <4 x i32> %sub.i.i
+}
+
 define <2 x i64> @test_vmlsl_high_n_u32(<2 x i64> %a, <4 x i32> %b, i32 %c) #0 {
 ; CHECK-LABEL: test_vmlsl_high_n_u32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -235,6 +409,18 @@ entry:
   ret <2 x i64> %sub.i.i
 }
 
+define <2 x i64> @test_vmlsl_high_n_u32_imm(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: test_vmlsl_high_n_u32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d
+; CHECK-NEXT: umlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vmull2.i.i.i = call <2 x i64> @llvm.aarch64.neon.umull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>)
+  %sub.i.i = sub <2 x i64> %a, %vmull2.i.i.i
+  ret <2 x i64> %sub.i.i
+}
+
 define <4 x i32> @test_vqdmlsl_high_n_s16(<4 x i32> %a, <8 x i16> %b, i16 %c) #0 {
 ; CHECK-LABEL: test_vqdmlsl_high_n_s16:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].8h, w0
@@ -251,6 +437,18 @@ entry:
   ret <4 x i32> %vqdmlsl17.i.i
 }
 
+define <4 x i32> @test_vqdmlsl_high_n_s16_imm(<4 x i32> %a, <8 x i16> %b) #0 {
+; CHECK-LABEL: test_vqdmlsl_high_n_s16_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].8h, #0x1d
+; CHECK-NEXT: sqdmlsl2 {{v[0-9]+}}.4s, {{v[0-9]+}}.8h, [[REPLICATE]].8h
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <8 x i16> %b, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
+  %vqdmlsl15.i.i = call <4 x i32> @llvm.aarch64.neon.sqdmull.v4i32(<4 x i16> %shuffle.i.i, <4 x i16> <i16 29, i16 29, i16 29, i16 29>)
+  %vqdmlsl17.i.i = call <4 x i32> @llvm.aarch64.neon.sqsub.v4i32(<4 x i32> %a, <4 x i32> %vqdmlsl15.i.i)
+  ret <4 x i32> %vqdmlsl17.i.i
+}
+
 define <2 x i64> @test_vqdmlsl_high_n_s32(<2 x i64> %a, <4 x i32> %b, i32 %c) #0 {
 ; CHECK-LABEL: test_vqdmlsl_high_n_s32:
 ; CHECK-NEXT: dup [[REPLICATE:v[0-9]+]].4s, w0
@@ -264,6 +462,18 @@ entry:
   %vqdmlsl11.i.i = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i.i)
   ret <2 x i64> %vqdmlsl11.i.i
 }
+
+define <2 x i64> @test_vqdmlsl_high_n_s32_imm(<2 x i64> %a, <4 x i32> %b) #0 {
+; CHECK-LABEL: test_vqdmlsl_high_n_s32_imm:
+; CHECK-NEXT: movi [[REPLICATE:v[0-9]+]].4s, #0x1d
+; CHECK-NEXT: sqdmlsl2 {{v[0-9]+}}.2d, {{v[0-9]+}}.4s, [[REPLICATE]].4s
+; CHECK-NEXT: ret
+entry:
+  %shuffle.i.i = shufflevector <4 x i32> %b, <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+  %vqdmlsl9.i.i = call <2 x i64> @llvm.aarch64.neon.sqdmull.v2i64(<2 x i32> %shuffle.i.i, <2 x i32> <i32 29, i32 29>)
+  %vqdmlsl11.i.i = call <2 x i64> @llvm.aarch64.neon.sqsub.v2i64(<2 x i64> %a, <2 x i64> %vqdmlsl9.i.i)
+  ret <2 x i64> %vqdmlsl11.i.i
+}
 
 define <2 x float> @test_vmul_n_f32(<2 x float> %a, float %b) #0 {
 ; CHECK-LABEL: test_vmul_n_f32:





More information about the llvm-commits mailing list