[llvm] b0db2db - [AArch64][SVEIntrinsicOpts] Optimize tbl+dup into dup+extractelement

Jun Ma via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 29 19:35:43 PDT 2021


Author: Jun Ma
Date: 2021-03-30T10:35:08+08:00
New Revision: b0db2dbc291f1fdc48b7e03c78a1dfdf26864c14

URL: https://github.com/llvm/llvm-project/commit/b0db2dbc291f1fdc48b7e03c78a1dfdf26864c14
DIFF: https://github.com/llvm/llvm-project/commit/b0db2dbc291f1fdc48b7e03c78a1dfdf26864c14.diff

LOG: [AArch64][SVEIntrinsicOpts] Optimize tbl+dup into dup+extractelement

Differential Revision: https://reviews.llvm.org/D99412

Added: 
    llvm/test/CodeGen/AArch64/sve-tbl-dupx.ll

Modified: 
    llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
index 6b8cb786bb6c5..e907400304606 100644
--- a/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
+++ b/llvm/lib/Target/AArch64/SVEIntrinsicOpts.cpp
@@ -78,6 +78,7 @@ struct SVEIntrinsicOpts : public ModulePass {
   static bool optimizeConvertFromSVBool(IntrinsicInst *I);
   static bool optimizePTest(IntrinsicInst *I);
   static bool optimizeVectorMul(IntrinsicInst *I);
+  static bool optimizeTBL(IntrinsicInst *I);
 
   static bool processPhiNode(IntrinsicInst *I);
 };
@@ -437,6 +438,41 @@ bool SVEIntrinsicOpts::optimizeVectorMul(IntrinsicInst *I) {
   return Changed;
 }
 
+bool SVEIntrinsicOpts::optimizeTBL(IntrinsicInst *I) {
+  assert(I->getIntrinsicID() == Intrinsic::aarch64_sve_tbl &&
+         "Unexpected opcode");
+
+  auto *OpVal = I->getOperand(0);
+  auto *OpIndices = I->getOperand(1);
+  VectorType *VTy = cast<VectorType>(I->getType());
+
+  // Check whether OpIndices is an aarch64_sve_dup_x intrinsic call with
+  // constant splat value < minimal element count of result.
+  auto *DupXIntrI = dyn_cast<IntrinsicInst>(OpIndices);
+  if (!DupXIntrI || DupXIntrI->getIntrinsicID() != Intrinsic::aarch64_sve_dup_x)
+    return false;
+
+  auto *SplatValue = dyn_cast<ConstantInt>(DupXIntrI->getOperand(0));
+  if (!SplatValue ||
+      SplatValue->getValue().uge(VTy->getElementCount().getKnownMinValue()))
+    return false;
+
+  // Convert sve_tbl(OpVal sve_dup_x(SplatValue)) to
+  // splat_vector(extractelement(OpVal, SplatValue)) for further optimization.
+  LLVMContext &Ctx = I->getContext();
+  IRBuilder<> Builder(Ctx);
+  Builder.SetInsertPoint(I);
+  auto *Extract = Builder.CreateExtractElement(OpVal, SplatValue);
+  auto *VectorSplat =
+      Builder.CreateVectorSplat(VTy->getElementCount(), Extract);
+
+  I->replaceAllUsesWith(VectorSplat);
+  I->eraseFromParent();
+  if (DupXIntrI->use_empty())
+    DupXIntrI->eraseFromParent();
+  return true;
+}
+
 bool SVEIntrinsicOpts::optimizeConvertFromSVBool(IntrinsicInst *I) {
   assert(I->getIntrinsicID() == Intrinsic::aarch64_sve_convert_from_svbool &&
          "Unexpected opcode");
@@ -507,6 +543,8 @@ bool SVEIntrinsicOpts::optimizeIntrinsic(Instruction *I) {
   case Intrinsic::aarch64_sve_ptest_first:
   case Intrinsic::aarch64_sve_ptest_last:
     return optimizePTest(IntrI);
+  case Intrinsic::aarch64_sve_tbl:
+    return optimizeTBL(IntrI);
   default:
     return false;
   }
@@ -560,6 +598,7 @@ bool SVEIntrinsicOpts::runOnModule(Module &M) {
     case Intrinsic::aarch64_sve_ptrue:
     case Intrinsic::aarch64_sve_mul:
     case Intrinsic::aarch64_sve_fmul:
+    case Intrinsic::aarch64_sve_tbl:
       for (User *U : F.users())
         Functions.insert(cast<Instruction>(U)->getFunction());
       break;

diff  --git a/llvm/test/CodeGen/AArch64/sve-tbl-dupx.ll b/llvm/test/CodeGen/AArch64/sve-tbl-dupx.ll
new file mode 100644
index 0000000000000..1624380f5ba44
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve-tbl-dupx.ll
@@ -0,0 +1,104 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -aarch64-sve-intrinsic-opts -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
+; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
+
+; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
+; WARN-NOT: warning
+
+; op2 = tbl(op1 dup_x(idx)) -> op2 = vector_splat(extractelement(op1, idx))
+
+define <vscale x 16 x i8> @dup_ext_i8(<vscale x 16 x i8> %data) {
+; CHECK-LABEL: @dup_ext_i8(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 16 x i8> [[DATA:%.*]], i8 1
+; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[TMP1]], i32 0
+; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[DOTSPLAT]]
+;
+  %tmp = call <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8(i8 1)
+  %out = call <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8(<vscale x 16 x i8>  %data, <vscale x 16 x i8> %tmp)
+  ret <vscale x 16 x i8> %out
+}
+
+define <vscale x 8 x i16> @dup_ext_i16(<vscale x 8 x i16> %data) {
+; CHECK-LABEL: @dup_ext_i16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 8 x i16> [[DATA:%.*]], i16 1
+; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[TMP1]], i32 0
+; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 8 x i16> [[DOTSPLAT]]
+;
+  %tmp = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
+  %out = call <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16(<vscale x 8 x i16>  %data, <vscale x 8 x i16> %tmp)
+  ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @dup_ext_i32(<vscale x 4 x i32> %data) {
+; CHECK-LABEL: @dup_ext_i32(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 4 x i32> [[DATA:%.*]], i32 1
+; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[TMP1]], i32 0
+; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 4 x i32> [[DOTSPLAT]]
+;
+  %tmp = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 1)
+  %out = call <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32(<vscale x 4 x i32>  %data, <vscale x 4 x i32> %tmp)
+  ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @dup_ext_i64(<vscale x 2 x i64> %data) {
+; CHECK-LABEL: @dup_ext_i64(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 2 x i64> [[DATA:%.*]], i64 1
+; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP1]], i32 0
+; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 2 x i64> [[DOTSPLAT]]
+;
+  %tmp = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 1)
+  %out = call <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64(<vscale x 2 x i64>  %data, <vscale x 2 x i64> %tmp)
+  ret <vscale x 2 x i64> %out
+}
+
+define <vscale x 8 x half> @dup_ext_f16(<vscale x 8 x half> %data) {
+; CHECK-LABEL: @dup_ext_f16(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 8 x half> [[DATA:%.*]], i16 1
+; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[TMP1]], i32 0
+; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x half> [[DOTSPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 8 x half> [[DOTSPLAT]]
+;
+  %tmp = call <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16 1)
+  %out = call <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16(<vscale x 8 x half>  %data, <vscale x 8 x i16> %tmp)
+  ret <vscale x 8 x half> %out
+}
+
+define <vscale x 4 x float> @dup_ext_f32(<vscale x 4 x float> %data) {
+; CHECK-LABEL: @dup_ext_f32(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 4 x float> [[DATA:%.*]], i32 1
+; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[TMP1]], i32 0
+; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x float> [[DOTSPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 4 x float> [[DOTSPLAT]]
+;
+  %tmp = call <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32 1)
+  %out = call <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32(<vscale x 4 x float>  %data, <vscale x 4 x i32> %tmp)
+  ret <vscale x 4 x float> %out
+}
+
+define <vscale x 2 x double> @dup_ext_f64(<vscale x 2 x double> %data) {
+; CHECK-LABEL: @dup_ext_f64(
+; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 2 x double> [[DATA:%.*]], i64 1
+; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[TMP1]], i32 0
+; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x double> [[DOTSPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
+; CHECK-NEXT:    ret <vscale x 2 x double> [[DOTSPLAT]]
+;
+  %tmp = call <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64 1)
+  %out = call <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i64> %tmp)
+  ret <vscale x 2 x double> %out
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.dup.x.nxv16i8( i8)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.dup.x.nxv8i16(i16)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.dup.x.nxv4i32(i32)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.dup.x.nxv2i64(i64)
+declare <vscale x 16 x i8> @llvm.aarch64.sve.tbl.nxv16i8( <vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.tbl.nxv8i16( <vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.tbl.nxv4i32( <vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.tbl.nxv2i64( <vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 8 x half> @llvm.aarch64.sve.tbl.nxv8f16( <vscale x 8 x half>, <vscale x 8 x i16>)
+declare <vscale x 4 x float> @llvm.aarch64.sve.tbl.nxv4f32( <vscale x 4 x float>, <vscale x 4 x i32>)
+declare <vscale x 2 x double> @llvm.aarch64.sve.tbl.nxv2f64( <vscale x 2 x double>, <vscale x 2 x i64>)


        


More information about the llvm-commits mailing list