[llvm-branch-commits] [llvm] 63e6bba - Revert "[LoongArch] Support CTLZ with lsx/lasx"

via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Mon Dec 4 11:02:24 PST 2023


Author: wanglei
Date: 2023-12-02T17:17:48+08:00
New Revision: 63e6bba0c322b6c07a4c66f09e6fd84e786248e6

URL: https://github.com/llvm/llvm-project/commit/63e6bba0c322b6c07a4c66f09e6fd84e786248e6
DIFF: https://github.com/llvm/llvm-project/commit/63e6bba0c322b6c07a4c66f09e6fd84e786248e6.diff

LOG: Revert "[LoongArch] Support CTLZ with lsx/lasx"

This reverts commit 07cec73dcd095035257eec1f213d273b10988130.

Added: 
    

Modified: 
    llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
    llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
    llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td

Removed: 
    llvm/test/CodeGen/LoongArch/lasx/ctpop-ctlz.ll
    llvm/test/CodeGen/LoongArch/lsx/ctpop-ctlz.ll


################################################################################
diff  --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 9f8b2fd07a48d..60e692fa4c1d0 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -256,7 +256,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
                          VT, Legal);
       setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, VT, Legal);
       setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, VT, Legal);
-      setOperationAction({ISD::CTPOP, ISD::CTLZ}, VT, Legal);
+      setOperationAction(ISD::CTPOP, VT, Legal);
     }
     for (MVT VT : {MVT::v4f32, MVT::v2f64}) {
       setOperationAction({ISD::FADD, ISD::FSUB}, VT, Legal);
@@ -286,7 +286,7 @@ LoongArchTargetLowering::LoongArchTargetLowering(const TargetMachine &TM,
                          VT, Legal);
       setOperationAction({ISD::AND, ISD::OR, ISD::XOR}, VT, Legal);
       setOperationAction({ISD::SHL, ISD::SRA, ISD::SRL}, VT, Legal);
-      setOperationAction({ISD::CTPOP, ISD::CTLZ}, VT, Legal);
+      setOperationAction(ISD::CTPOP, VT, Legal);
     }
     for (MVT VT : {MVT::v8f32, MVT::v4f64}) {
       setOperationAction({ISD::FADD, ISD::FSUB}, VT, Legal);
@@ -2827,15 +2827,6 @@ performINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
   case Intrinsic::loongarch_lasx_xvsrai_d:
     return DAG.getNode(ISD::SRA, DL, N->getValueType(0), N->getOperand(1),
                        lowerVectorSplatImm<6>(N, 2, DAG));
-  case Intrinsic::loongarch_lsx_vclz_b:
-  case Intrinsic::loongarch_lsx_vclz_h:
-  case Intrinsic::loongarch_lsx_vclz_w:
-  case Intrinsic::loongarch_lsx_vclz_d:
-  case Intrinsic::loongarch_lasx_xvclz_b:
-  case Intrinsic::loongarch_lasx_xvclz_h:
-  case Intrinsic::loongarch_lasx_xvclz_w:
-  case Intrinsic::loongarch_lasx_xvclz_d:
-    return DAG.getNode(ISD::CTLZ, DL, N->getValueType(0), N->getOperand(1));
   case Intrinsic::loongarch_lsx_vpcnt_b:
   case Intrinsic::loongarch_lsx_vpcnt_h:
   case Intrinsic::loongarch_lsx_vpcnt_w:

diff  --git a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
index 960ac627578cd..a5652472481ac 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLASXInstrInfo.td
@@ -1273,9 +1273,6 @@ defm : PatXrXr<sra, "XVSRA">;
 defm : PatShiftXrXr<sra, "XVSRA">;
 defm : PatShiftXrUimm<sra, "XVSRAI">;
 
-// XVCLZ_{B/H/W/D}
-defm : PatXr<ctlz, "XVCLZ">;
-
 // XVPCNT_{B/H/W/D}
 defm : PatXr<ctpop, "XVPCNT">;
 
@@ -1593,26 +1590,26 @@ foreach Inst = ["XVMADDWEV_Q_D", "XVMADDWOD_Q_D", "XVMADDWEV_Q_DU",
 //     (LAInst vty:$xj)>;
 foreach Inst = ["XVEXTH_H_B", "XVEXTH_HU_BU",
                 "XVMSKLTZ_B", "XVMSKGEZ_B", "XVMSKNZ_B",
-                "XVCLO_B", "VEXT2XV_H_B", "VEXT2XV_HU_BU",
+                "XVCLO_B", "XVCLZ_B", "VEXT2XV_H_B", "VEXT2XV_HU_BU",
                 "VEXT2XV_W_B", "VEXT2XV_WU_BU", "VEXT2XV_D_B",
                 "VEXT2XV_DU_BU", "XVREPLVE0_B", "XVREPLVE0_Q"] in
   def : Pat<(deriveLASXIntrinsic<Inst>.ret (v32i8 LASX256:$xj)),
             (!cast<LAInst>(Inst) LASX256:$xj)>;
 foreach Inst = ["XVEXTH_W_H", "XVEXTH_WU_HU", "XVMSKLTZ_H",
-                "XVCLO_H", "XVFCVTL_S_H", "XVFCVTH_S_H",
+                "XVCLO_H", "XVCLZ_H", "XVFCVTL_S_H", "XVFCVTH_S_H",
                 "VEXT2XV_W_H", "VEXT2XV_WU_HU", "VEXT2XV_D_H",
                 "VEXT2XV_DU_HU", "XVREPLVE0_H"] in
   def : Pat<(deriveLASXIntrinsic<Inst>.ret (v16i16 LASX256:$xj)),
             (!cast<LAInst>(Inst) LASX256:$xj)>;
 foreach Inst = ["XVEXTH_D_W", "XVEXTH_DU_WU", "XVMSKLTZ_W",
-                "XVCLO_W", "XVFFINT_S_W", "XVFFINT_S_WU",
+                "XVCLO_W", "XVCLZ_W", "XVFFINT_S_W", "XVFFINT_S_WU",
                 "XVFFINTL_D_W", "XVFFINTH_D_W",
                 "VEXT2XV_D_W", "VEXT2XV_DU_WU", "XVREPLVE0_W"] in
   def : Pat<(deriveLASXIntrinsic<Inst>.ret (v8i32 LASX256:$xj)),
             (!cast<LAInst>(Inst) LASX256:$xj)>;
 foreach Inst = ["XVEXTH_Q_D", "XVEXTH_QU_DU", "XVMSKLTZ_D",
                 "XVEXTL_Q_D", "XVEXTL_QU_DU",
-                "XVCLO_D", "XVFFINT_D_L", "XVFFINT_D_LU",
+                "XVCLO_D", "XVCLZ_D", "XVFFINT_D_L", "XVFFINT_D_LU",
                 "XVREPLVE0_D"] in
   def : Pat<(deriveLASXIntrinsic<Inst>.ret (v4i64 LASX256:$xj)),
             (!cast<LAInst>(Inst) LASX256:$xj)>;

diff  --git a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
index 3480ade9eebf9..5645ce51194ac 100644
--- a/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
+++ b/llvm/lib/Target/LoongArch/LoongArchLSXInstrInfo.td
@@ -1350,9 +1350,6 @@ defm : PatVrVr<sra, "VSRA">;
 defm : PatShiftVrVr<sra, "VSRA">;
 defm : PatShiftVrUimm<sra, "VSRAI">;
 
-// VCLZ_{B/H/W/D}
-defm : PatVr<ctlz, "VCLZ">;
-
 // VPCNT_{B/H/W/D}
 defm : PatVr<ctpop, "VPCNT">;
 
@@ -1677,21 +1674,21 @@ foreach Inst = ["VMADDWEV_Q_D", "VMADDWOD_Q_D", "VMADDWEV_Q_DU",
 //     (LAInst vty:$vj)>;
 foreach Inst = ["VEXTH_H_B", "VEXTH_HU_BU",
                 "VMSKLTZ_B", "VMSKGEZ_B", "VMSKNZ_B",
-                "VCLO_B"] in
+                "VCLO_B", "VCLZ_B"] in
   def : Pat<(deriveLSXIntrinsic<Inst>.ret (v16i8 LSX128:$vj)),
             (!cast<LAInst>(Inst) LSX128:$vj)>;
 foreach Inst = ["VEXTH_W_H", "VEXTH_WU_HU", "VMSKLTZ_H",
-                "VCLO_H", "VFCVTL_S_H", "VFCVTH_S_H"] in
+                "VCLO_H", "VCLZ_H", "VFCVTL_S_H", "VFCVTH_S_H"] in
   def : Pat<(deriveLSXIntrinsic<Inst>.ret (v8i16 LSX128:$vj)),
             (!cast<LAInst>(Inst) LSX128:$vj)>;
 foreach Inst = ["VEXTH_D_W", "VEXTH_DU_WU", "VMSKLTZ_W",
-                "VCLO_W", "VFFINT_S_W", "VFFINT_S_WU",
+                "VCLO_W", "VCLZ_W", "VFFINT_S_W", "VFFINT_S_WU",
                 "VFFINTL_D_W", "VFFINTH_D_W"] in
   def : Pat<(deriveLSXIntrinsic<Inst>.ret (v4i32 LSX128:$vj)),
             (!cast<LAInst>(Inst) LSX128:$vj)>;
 foreach Inst = ["VEXTH_Q_D", "VEXTH_QU_DU", "VMSKLTZ_D",
                 "VEXTL_Q_D", "VEXTL_QU_DU",
-                "VCLO_D", "VFFINT_D_L", "VFFINT_D_LU"] in
+                "VCLO_D", "VCLZ_D", "VFFINT_D_L", "VFFINT_D_LU"] in
   def : Pat<(deriveLSXIntrinsic<Inst>.ret (v2i64 LSX128:$vj)),
             (!cast<LAInst>(Inst) LSX128:$vj)>;
 

diff  --git a/llvm/test/CodeGen/LoongArch/lasx/ctpop-ctlz.ll b/llvm/test/CodeGen/LoongArch/lasx/ctpop-ctlz.ll
deleted file mode 100644
index 05511b50e5052..0000000000000
--- a/llvm/test/CodeGen/LoongArch/lasx/ctpop-ctlz.ll
+++ /dev/null
@@ -1,151 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc --mtriple=loongarch64 --mattr=+lasx < %s | FileCheck %s
-
-define void @ctpop_v32i8(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v32i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpcnt.b $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <32 x i8>, ptr %src
-  %res = call <32 x i8> @llvm.ctpop.v32i8(<32 x i8> %v)
-  store <32 x i8> %res, ptr %dst
-  ret void
-}
-
-define void @ctpop_v16i16(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v16i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpcnt.h $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <16 x i16>, ptr %src
-  %res = call <16 x i16> @llvm.ctpop.v16i16(<16 x i16> %v)
-  store <16 x i16> %res, ptr %dst
-  ret void
-}
-
-define void @ctpop_v8i32(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v8i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpcnt.w $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <8 x i32>, ptr %src
-  %res = call <8 x i32> @llvm.ctpop.v8i32(<8 x i32> %v)
-  store <8 x i32> %res, ptr %dst
-  ret void
-}
-
-define void @ctpop_v4i64(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvpcnt.d $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <4 x i64>, ptr %src
-  %res = call <4 x i64> @llvm.ctpop.v4i64(<4 x i64> %v)
-  store <4 x i64> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v32i8(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v32i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvsrli.b $xr1, $xr0, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.b $xr1, $xr0, 2
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.b $xr1, $xr0, 4
-; CHECK-NEXT:    xvnor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpcnt.b $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <32 x i8>, ptr %src
-  %res = call <32 x i8> @llvm.ctlz.v32i8(<32 x i8> %v, i1 false)
-  store <32 x i8> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v16i16(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v16i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvsrli.h $xr1, $xr0, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.h $xr1, $xr0, 2
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.h $xr1, $xr0, 4
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.h $xr1, $xr0, 8
-; CHECK-NEXT:    xvnor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpcnt.h $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <16 x i16>, ptr %src
-  %res = call <16 x i16> @llvm.ctlz.v16i16(<16 x i16> %v, i1 false)
-  store <16 x i16> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v8i32(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v8i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvsrli.w $xr1, $xr0, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.w $xr1, $xr0, 2
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.w $xr1, $xr0, 4
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.w $xr1, $xr0, 8
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.w $xr1, $xr0, 16
-; CHECK-NEXT:    xvnor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpcnt.w $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <8 x i32>, ptr %src
-  %res = call <8 x i32> @llvm.ctlz.v8i32(<8 x i32> %v, i1 false)
-  store <8 x i32> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v4i64(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v4i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    xvld $xr0, $a0, 0
-; CHECK-NEXT:    xvsrli.d $xr1, $xr0, 1
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.d $xr1, $xr0, 2
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.d $xr1, $xr0, 4
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.d $xr1, $xr0, 8
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.d $xr1, $xr0, 16
-; CHECK-NEXT:    xvor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvsrli.d $xr1, $xr0, 32
-; CHECK-NEXT:    xvnor.v $xr0, $xr0, $xr1
-; CHECK-NEXT:    xvpcnt.d $xr0, $xr0
-; CHECK-NEXT:    xvst $xr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <4 x i64>, ptr %src
-  %res = call <4 x i64> @llvm.ctlz.v4i64(<4 x i64> %v, i1 false)
-  store <4 x i64> %res, ptr %dst
-  ret void
-}
-
-declare <32 x i8> @llvm.ctpop.v32i8(<32 x i8>)
-declare <16 x i16> @llvm.ctpop.v16i16(<16 x i16>)
-declare <8 x i32> @llvm.ctpop.v8i32(<8 x i32>)
-declare <4 x i64> @llvm.ctpop.v4i64(<4 x i64>)
-declare <32 x i8> @llvm.ctlz.v32i8(<32 x i8>, i1)
-declare <16 x i16> @llvm.ctlz.v16i16(<16 x i16>, i1)
-declare <8 x i32> @llvm.ctlz.v8i32(<8 x i32>, i1)
-declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1)

diff  --git a/llvm/test/CodeGen/LoongArch/lsx/ctpop-ctlz.ll b/llvm/test/CodeGen/LoongArch/lsx/ctpop-ctlz.ll
deleted file mode 100644
index 8746766d6ba72..0000000000000
--- a/llvm/test/CodeGen/LoongArch/lsx/ctpop-ctlz.ll
+++ /dev/null
@@ -1,151 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s
-
-define void @ctpop_v16i8(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vpcnt.b $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <16 x i8>, ptr %src
-  %res = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %v)
-  store <16 x i8> %res, ptr %dst
-  ret void
-}
-
-define void @ctpop_v8i16(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vpcnt.h $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <8 x i16>, ptr %src
-  %res = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %v)
-  store <8 x i16> %res, ptr %dst
-  ret void
-}
-
-define void @ctpop_v4i32(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vpcnt.w $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <4 x i32>, ptr %src
-  %res = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %v)
-  store <4 x i32> %res, ptr %dst
-  ret void
-}
-
-define void @ctpop_v2i64(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctpop_v2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vpcnt.d $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <2 x i64>, ptr %src
-  %res = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %v)
-  store <2 x i64> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v16i8(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v16i8:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.b $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.b $vr1, $vr0, 2
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.b $vr1, $vr0, 4
-; CHECK-NEXT:    vnor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vpcnt.b $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <16 x i8>, ptr %src
-  %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %v, i1 false)
-  store <16 x i8> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v8i16(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v8i16:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.h $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.h $vr1, $vr0, 2
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.h $vr1, $vr0, 4
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.h $vr1, $vr0, 8
-; CHECK-NEXT:    vnor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vpcnt.h $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <8 x i16>, ptr %src
-  %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %v, i1 false)
-  store <8 x i16> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v4i32(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v4i32:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.w $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.w $vr1, $vr0, 2
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.w $vr1, $vr0, 4
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.w $vr1, $vr0, 8
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.w $vr1, $vr0, 16
-; CHECK-NEXT:    vnor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vpcnt.w $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <4 x i32>, ptr %src
-  %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %v, i1 false)
-  store <4 x i32> %res, ptr %dst
-  ret void
-}
-
-define void @ctlz_v2i64(ptr %src, ptr %dst) nounwind {
-; CHECK-LABEL: ctlz_v2i64:
-; CHECK:       # %bb.0:
-; CHECK-NEXT:    vld $vr0, $a0, 0
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 1
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 2
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 4
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 8
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 16
-; CHECK-NEXT:    vor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vsrli.d $vr1, $vr0, 32
-; CHECK-NEXT:    vnor.v $vr0, $vr0, $vr1
-; CHECK-NEXT:    vpcnt.d $vr0, $vr0
-; CHECK-NEXT:    vst $vr0, $a1, 0
-; CHECK-NEXT:    ret
-  %v = load <2 x i64>, ptr %src
-  %res = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %v, i1 false)
-  store <2 x i64> %res, ptr %dst
-  ret void
-}
-
-declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8>)
-declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16>)
-declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32>)
-declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
-declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8>, i1)
-declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16>, i1)
-declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
-declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1)


        


More information about the llvm-branch-commits mailing list