[llvm] 1b9a608 - [AArch64][SVE] Add wide compare immediate patterns

Cullen Rhodes via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 10 02:55:03 PST 2019


Author: Cullen Rhodes
Date: 2019-12-10T10:41:22Z
New Revision: 1b9a608c8440b655c15378f3f7a13180281e2613

URL: https://github.com/llvm/llvm-project/commit/1b9a608c8440b655c15378f3f7a13180281e2613
DIFF: https://github.com/llvm/llvm-project/commit/1b9a608c8440b655c15378f3f7a13180281e2613.diff

LOG: [AArch64][SVE] Add wide compare immediate patterns

Summary:
Recognize wide compares where the wide operand is a splat of a scalar
value in the appropriate range and convert to the immediate variant of
the instruction.

Patch by Graham Hunter

Reviewers: sdesmalen, efriedma, dancgr, rovka, rengolin

Reviewed By: efriedma

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl,
llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D71009

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
    llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b42496abecb6..5e5ef962e2e4 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -10560,6 +10560,77 @@ static SDValue LowerSVEIntReduction(SDNode *N, unsigned Opc,
   return SDValue();
 }
 
+static SDValue tryConvertSVEWideCompare(SDNode *N, unsigned ReplacementIID,
+                                        bool Invert,
+                                        TargetLowering::DAGCombinerInfo &DCI,
+                                        SelectionDAG &DAG) {
+  if (DCI.isBeforeLegalize())
+    return SDValue();
+
+  SDValue Comparator = N->getOperand(3);
+  if (Comparator.getOpcode() == AArch64ISD::DUP ||
+      Comparator.getOpcode() == ISD::SPLAT_VECTOR) {
+    unsigned IID = getIntrinsicID(N);
+    EVT VT = N->getValueType(0);
+    EVT CmpVT = N->getOperand(2).getValueType();
+    SDValue Pred = N->getOperand(1);
+    SDValue Imm;
+    SDLoc DL(N);
+
+    switch (IID) {
+    default:
+      llvm_unreachable("Called with wrong intrinsic!");
+      break;
+
+    // Signed comparisons
+    case Intrinsic::aarch64_sve_cmpeq_wide:
+    case Intrinsic::aarch64_sve_cmpne_wide:
+    case Intrinsic::aarch64_sve_cmpge_wide:
+    case Intrinsic::aarch64_sve_cmpgt_wide:
+    case Intrinsic::aarch64_sve_cmplt_wide:
+    case Intrinsic::aarch64_sve_cmple_wide: {
+      if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
+        int64_t ImmVal = CN->getSExtValue();
+        if (ImmVal >= -16 && ImmVal <= 15)
+          Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
+        else
+          return SDValue();
+      }
+      break;
+    }
+    // Unsigned comparisons
+    case Intrinsic::aarch64_sve_cmphs_wide:
+    case Intrinsic::aarch64_sve_cmphi_wide:
+    case Intrinsic::aarch64_sve_cmplo_wide:
+    case Intrinsic::aarch64_sve_cmpls_wide:  {
+      if (auto *CN = dyn_cast<ConstantSDNode>(Comparator.getOperand(0))) {
+        uint64_t ImmVal = CN->getZExtValue();
+        if (ImmVal <= 127)
+          Imm = DAG.getConstant(ImmVal, DL, MVT::i32);
+        else
+          return SDValue();
+      }
+      break;
+    }
+    }
+
+    SDValue Splat = DAG.getNode(ISD::SPLAT_VECTOR, DL, CmpVT, Imm);
+    SDValue ID = DAG.getTargetConstant(ReplacementIID, DL, MVT::i64);
+    SDValue Op0, Op1;
+    if (Invert) {
+      Op0 = Splat;
+      Op1 = N->getOperand(2);
+    } else {
+      Op0 = N->getOperand(2);
+      Op1 = Splat;
+    }
+    return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
+                       ID, Pred, Op0, Op1);
+  }
+
+  return SDValue();
+}
+
 static SDValue performIntrinsicCombine(SDNode *N,
                                        TargetLowering::DAGCombinerInfo &DCI,
                                        const AArch64Subtarget *Subtarget) {
@@ -10628,6 +10699,36 @@ static SDValue performIntrinsicCombine(SDNode *N,
     return LowerSVEIntReduction(N, AArch64ISD::EORV_PRED, DAG);
   case Intrinsic::aarch64_sve_andv:
     return LowerSVEIntReduction(N, AArch64ISD::ANDV_PRED, DAG);
+  case Intrinsic::aarch64_sve_cmpeq_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmpeq,
+                                    false, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmpne_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmpne,
+                                    false, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmpge_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmpge,
+                                    false, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmpgt_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmpgt,
+                                    false, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmplt_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmpgt,
+                                    true, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmple_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmpge,
+                                    true, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmphs_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmphs,
+                                    false, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmphi_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmphi,
+                                    false, DCI, DAG);
+  case Intrinsic::aarch64_sve_cmplo_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmphi, true,
+                                    DCI, DAG);
+  case Intrinsic::aarch64_sve_cmpls_wide:
+    return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmphs, true,
+                                    DCI, DAG);
   }
   return SDValue();
 }

diff  --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
index a86a97f5b5d7..9e3190aaa680 100644
--- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
+++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll
@@ -30,6 +30,18 @@ define <vscale x 16 x i1> @int_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmpeq_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmpeq_b
+; CHECK: cmpeq p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmpeq_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmpeq_h
 ; CHECK: cmpeq p0.h, p0/z, z0.h, #-16
@@ -52,6 +64,18 @@ define <vscale x 8 x i1> @int_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmpeq_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmpeq_h
+; CHECK: cmpeq p0.h, p0/z, z0.h, #-16
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmpeq_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmpeq_s
 ; CHECK: cmpeq p0.s, p0/z, z0.s, #15
@@ -74,6 +98,18 @@ define <vscale x 4 x i1> @int_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmpeq_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmpeq_s
+; CHECK: cmpeq p0.s, p0/z, z0.s, #15
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmpeq_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmpeq_d
 ; CHECK: cmpeq p0.d, p0/z, z0.d, #0
@@ -122,6 +158,18 @@ define <vscale x 16 x i1> @int_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmpge_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmpge_b
+; CHECK: cmpge p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmpge_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmpge_h
 ; CHECK: cmpge p0.h, p0/z, z0.h, #-16
@@ -144,6 +192,18 @@ define <vscale x 8 x i1> @int_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmpge_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmpge_h
+; CHECK: cmpge p0.h, p0/z, z0.h, #-16
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmpge_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmpge_s
 ; CHECK: cmpge p0.s, p0/z, z0.s, #15
@@ -166,6 +226,18 @@ define <vscale x 4 x i1> @int_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmpge_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmpge_s
+; CHECK: cmpge p0.s, p0/z, z0.s, #15
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmpge_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmpge_d
 ; CHECK: cmpge p0.d, p0/z, z0.d, #0
@@ -214,6 +286,18 @@ define <vscale x 16 x i1> @int_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmpgt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmpgt_b
+; CHECK: cmpgt p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmpgt_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmpgt_h
 ; CHECK: cmpgt p0.h, p0/z, z0.h, #-16
@@ -236,6 +320,18 @@ define <vscale x 8 x i1> @int_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmpgt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmpgt_h
+; CHECK: cmpgt p0.h, p0/z, z0.h, #-16
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmpgt_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmpgt_s
 ; CHECK: cmpgt p0.s, p0/z, z0.s, #15
@@ -258,6 +354,18 @@ define <vscale x 4 x i1> @int_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmpgt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmpgt_s
+; CHECK: cmpgt p0.s, p0/z, z0.s, #15
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmpgt_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmpgt_d
 ; CHECK: cmpgt p0.d, p0/z, z0.d, #0
@@ -306,6 +414,18 @@ define <vscale x 16 x i1> @int_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmple_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmple_b
+; CHECK: cmple p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmple_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmple_h
 ; CHECK: cmple p0.h, p0/z, z0.h, #-16
@@ -328,6 +448,18 @@ define <vscale x 8 x i1> @int_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmple_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmple_h
+; CHECK: cmple p0.h, p0/z, z0.h, #-16
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmple_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmple_s
 ; CHECK: cmple p0.s, p0/z, z0.s, #15
@@ -350,6 +482,18 @@ define <vscale x 4 x i1> @int_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmple_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmple_s
+; CHECK: cmple p0.s, p0/z, z0.s, #15
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmple_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmple_d
 ; CHECK: cmple p0.d, p0/z, z0.d, #0
@@ -398,6 +542,18 @@ define <vscale x 16 x i1> @int_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmplt_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmplt_b
+; CHECK: cmplt p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmplt_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmplt_h
 ; CHECK: cmplt p0.h, p0/z, z0.h, #-16
@@ -420,6 +576,18 @@ define <vscale x 8 x i1> @int_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmplt_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmplt_h
+; CHECK: cmplt p0.h, p0/z, z0.h, #-16
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmplt_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmplt_s
 ; CHECK: cmplt p0.s, p0/z, z0.s, #15
@@ -442,6 +610,18 @@ define <vscale x 4 x i1> @int_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmplt_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmplt_s
+; CHECK: cmplt p0.s, p0/z, z0.s, #15
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmplt_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmplt_d
 ; CHECK: cmplt p0.d, p0/z, z0.d, #0
@@ -490,6 +670,18 @@ define <vscale x 16 x i1> @int_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmpne_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmpne_b
+; CHECK: cmpne p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmpne_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmpne_h
 ; CHECK: cmpne p0.h, p0/z, z0.h, #-16
@@ -512,6 +704,18 @@ define <vscale x 8 x i1> @int_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmpne_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmpne_h
+; CHECK: cmpne p0.h, p0/z, z0.h, #-16
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 -16, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmpne_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmpne_s
 ; CHECK: cmpne p0.s, p0/z, z0.s, #15
@@ -534,6 +738,18 @@ define <vscale x 4 x i1> @int_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmpne_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmpne_s
+; CHECK: cmpne p0.s, p0/z, z0.s, #15
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 15, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmpne_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmpne_d
 ; CHECK: cmpne p0.d, p0/z, z0.d, #0
@@ -586,6 +802,18 @@ define <vscale x 16 x i1> @int_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmphi_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmphi_b
+; CHECK: cmphi p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmphi_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmphi_h
 ; CHECK: cmphi p0.h, p0/z, z0.h, #0
@@ -608,6 +836,18 @@ define <vscale x 8 x i1> @int_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmphi_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmphi_h
+; CHECK: cmphi p0.h, p0/z, z0.h, #0
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmphi_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmphi_s
 ; CHECK: cmphi p0.s, p0/z, z0.s, #68
@@ -630,6 +870,18 @@ define <vscale x 4 x i1> @int_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmphi_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmphi_s
+; CHECK: cmphi p0.s, p0/z, z0.s, #68
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmphi_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmphi_d
 ; CHECK: cmphi p0.d, p0/z, z0.d, #127
@@ -678,6 +930,18 @@ define <vscale x 16 x i1> @int_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmphs_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmphs_b
+; CHECK: cmphs p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmphs_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmphs_h
 ; CHECK: cmphs p0.h, p0/z, z0.h, #0
@@ -700,6 +964,18 @@ define <vscale x 8 x i1> @int_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmphs_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmphs_h
+; CHECK: cmphs p0.h, p0/z, z0.h, #0
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmphs_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmphs_s
 ; CHECK: cmphs p0.s, p0/z, z0.s, #68
@@ -722,6 +998,18 @@ define <vscale x 4 x i1> @int_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmphs_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmphs_s
+; CHECK: cmphs p0.s, p0/z, z0.s, #68
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmphs_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmphs_d
 ; CHECK: cmphs p0.d, p0/z, z0.d, #127
@@ -770,6 +1058,28 @@ define <vscale x 16 x i1> @int_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmplo_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmplo_b
+; CHECK: cmplo p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
+define <vscale x 8 x i1> @ir_cmplo_h(<vscale x 8 x i16> %a) {
+; CHECK-LABEL: ir_cmplo_h
+; CHECK: cmplo p0.h, p0/z, z0.h, #0
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
+  %splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
+  %out = icmp ult <vscale x 8 x i16> %a, %splat
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 8 x i1> @int_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
 ; CHECK-LABEL: int_cmplo_h
 ; CHECK: cmplo p0.h, p0/z, z0.h, #0
@@ -782,6 +1092,18 @@ define <vscale x 8 x i1> @int_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmplo_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmplo_h
+; CHECK: cmplo p0.h, p0/z, z0.h, #0
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmplo_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmplo_s
 ; CHECK: cmplo p0.s, p0/z, z0.s, #68
@@ -804,6 +1126,18 @@ define <vscale x 4 x i1> @int_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmplo_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmplo_s
+; CHECK: cmplo p0.s, p0/z, z0.s, #68
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmplo_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmplo_d
 ; CHECK: cmplo p0.d, p0/z, z0.d, #127
@@ -852,6 +1186,18 @@ define <vscale x 16 x i1> @int_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8
   ret <vscale x 16 x i1> %out
 }
 
+define <vscale x 16 x i1> @wide_cmpls_b(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a) {
+; CHECK-LABEL: wide_cmpls_b
+; CHECK: cmpls p0.b, p0/z, z0.b, #4
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 4, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1> %pg,
+                                                                      <vscale x 16 x i8> %a,
+                                                                      <vscale x 2 x i64> %splat)
+  ret <vscale x 16 x i1> %out
+}
+
 define <vscale x 8 x i1> @ir_cmpls_h(<vscale x 8 x i16> %a) {
 ; CHECK-LABEL: ir_cmpls_h
 ; CHECK: cmpls p0.h, p0/z, z0.h, #0
@@ -874,6 +1220,18 @@ define <vscale x 8 x i1> @int_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16>
   ret <vscale x 8 x i1> %out
 }
 
+define <vscale x 8 x i1> @wide_cmpls_h(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a) {
+; CHECK-LABEL: wide_cmpls_h
+; CHECK: cmpls p0.h, p0/z, z0.h, #0
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 0, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1> %pg,
+                                                                     <vscale x 8 x i16> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 8 x i1> %out
+}
+
 define <vscale x 4 x i1> @ir_cmpls_s(<vscale x 4 x i32> %a) {
 ; CHECK-LABEL: ir_cmpls_s
 ; CHECK: cmpls p0.s, p0/z, z0.s, #68
@@ -896,6 +1254,18 @@ define <vscale x 4 x i1> @int_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
   ret <vscale x 4 x i1> %out
 }
 
+define <vscale x 4 x i1> @wide_cmpls_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a) {
+; CHECK-LABEL: wide_cmpls_s
+; CHECK: cmpls p0.s, p0/z, z0.s, #68
+; CHECK-NEXT: ret
+  %elt   = insertelement <vscale x 2 x i64> undef, i64 68, i32 0
+  %splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
+  %out = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg,
+                                                                     <vscale x 4 x i32> %a,
+                                                                     <vscale x 2 x i64> %splat)
+  ret <vscale x 4 x i1> %out
+}
+
 define <vscale x 2 x i1> @ir_cmpls_d(<vscale x 2 x i64> %a) {
 ; CHECK-LABEL: ir_cmpls_d
 ; CHECK: cmpls p0.d, p0/z, z0.d, #127
@@ -922,28 +1292,62 @@ declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.nxv16i8(<vscale x 16 x i1>, <
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpge.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpge.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphi.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphi.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmphs.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmphs.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmple.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmple.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplo.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplo.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpls.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpls.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmplt.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmplt.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)
 
 declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
 declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
 declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
 declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 16 x i1> @llvm.aarch64.sve.cmpne.wide.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 2 x i64>)
+declare <vscale x 8 x i1> @llvm.aarch64.sve.cmpne.wide.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 2 x i64>)
+declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>)


        


More information about the llvm-commits mailing list