[llvm] [LLVM][InstCombine][AArch64] sve.insr(splat(x), x) ==> splat(x) (PR #109445)

Paul Walker via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 20 09:52:10 PDT 2024


https://github.com/paulwalker-arm created https://github.com/llvm/llvm-project/pull/109445

Fixes https://github.com/llvm/llvm-project/issues/100497

>From 7fa242090002453bbbf227fc928033adce9e3021 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 20 Sep 2024 17:18:34 +0100
Subject: [PATCH 1/2] Add tests to show INSR transformation

---
 .../InstCombine/AArch64/sve-intrinsic-insr.ll | 77 +++++++++++++++++++
 1 file changed, 77 insertions(+)
 create mode 100644 llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll

diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
new file mode 100644
index 00000000000000..f47ee4b679b87c
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
@@ -0,0 +1,77 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -passes=instcombine < %s | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+define <vscale x 16 x i8> @insr_val_into_splatted_val_int(i8 %a) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_val_int(
+; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[A:%.*]], i64 0
+; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 16 x i8> [[T0]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT:    [[T2:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> [[T1]], i8 [[A]])
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[T2]]
+;
+  %t0 = insertelement <vscale x 16 x i8> poison, i8 %a, i64 0
+  %t1 = shufflevector <vscale x 16 x i8> %t0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  %t2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %t1, i8 %a)
+  ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @insr_five_into_fives() #0 {
+; CHECK-LABEL: @insr_five_into_fives(
+; CHECK-NEXT:    [[T1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer), i16 5)
+; CHECK-NEXT:    ret <vscale x 8 x i16> [[T1]]
+;
+  %t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> splat (i16 5), i16 5)
+  ret <vscale x 8 x i16> %t1
+}
+
+define <vscale x 4 x float> @insr_val_into_splatted_val_fp(float %a) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_val_fp(
+; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 4 x float> poison, float [[A:%.*]], i64 0
+; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 4 x float> [[T0]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+; CHECK-NEXT:    [[T2:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float> [[T1]], float [[A]])
+; CHECK-NEXT:    ret <vscale x 4 x float> [[T2]]
+;
+  %t0 = insertelement <vscale x 4 x float> poison, float %a, i64 0
+  %t1 = shufflevector <vscale x 4 x float> %t0, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
+  %t2 = tail call <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float> %t1, float %a)
+  ret <vscale x 4 x float> %t2
+}
+
+define <vscale x 2 x double> @insr_zero_into_zero() #0 {
+; CHECK-LABEL: @insr_zero_into_zero(
+; CHECK-NEXT:    [[T1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double> zeroinitializer, double 0.000000e+00)
+; CHECK-NEXT:    ret <vscale x 2 x double> [[T1]]
+;
+  %t1 = tail call <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double> zeroinitializer, double zeroinitializer)
+  ret <vscale x 2 x double> %t1
+}
+
+define <vscale x 16 x i8> @insr_val_into_splatted_other(i8 %a, i8 %b) #0 {
+; CHECK-LABEL: @insr_val_into_splatted_other(
+; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
+; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 16 x i8> [[T0]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+; CHECK-NEXT:    [[T2:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> [[T1]], i8 [[A:%.*]])
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[T2]]
+;
+  %t0 = insertelement <vscale x 16 x i8> poison, i8 %b, i64 0
+  %t1 = shufflevector <vscale x 16 x i8> %t0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
+  %t2 = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> %t1, i8 %a)
+  ret <vscale x 16 x i8> %t2
+}
+
+define <vscale x 8 x i16> @insr_three_into_fives() #0 {
+; CHECK-LABEL: @insr_three_into_fives(
+; CHECK-NEXT:    [[T1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer), i16 3)
+; CHECK-NEXT:    ret <vscale x 8 x i16> [[T1]]
+;
+  %t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> splat (i16 5), i16 3)
+  ret <vscale x 8 x i16> %t1
+}
+
+declare <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8>, i8)
+declare <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16>, i16)
+declare <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float>, float)
+declare <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double>, double)
+
+attributes #0 = { "target-features"="+sve" }

>From 58f55f9a5a49f717d4a1e55197ba0a82b01c00b1 Mon Sep 17 00:00:00 2001
From: Paul Walker <paul.walker at arm.com>
Date: Fri, 20 Sep 2024 17:19:37 +0100
Subject: [PATCH 2/2] [LLVM][InstCombine][AArch64] sve.insr(splat(x), x) ==>
 splat(x).

Fixes https://github.com/llvm/llvm-project/issues/100497
---
 .../Target/AArch64/AArch64TargetTransformInfo.cpp    | 12 ++++++++++++
 .../InstCombine/AArch64/sve-intrinsic-insr.ll        | 12 ++++--------
 2 files changed, 16 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 11a4aa4d01e123..a7b4a176b9f7cd 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -2140,6 +2140,16 @@ static std::optional<Instruction *> instCombineSVESrshl(InstCombiner &IC,
   return IC.replaceInstUsesWith(II, LSL);
 }
 
+static std::optional<Instruction *> instCombineSVEInsr(InstCombiner &IC,
+                                                       IntrinsicInst &II) {
+  Value *Vec = II.getOperand(0);
+
+  if (getSplatValue(Vec) == II.getOperand(1))
+    return IC.replaceInstUsesWith(II, Vec);
+
+  return std::nullopt;
+}
+
 std::optional<Instruction *>
 AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
                                      IntrinsicInst &II) const {
@@ -2460,6 +2470,8 @@ AArch64TTIImpl::instCombineIntrinsic(InstCombiner &IC,
     return instCombineSVESrshl(IC, II);
   case Intrinsic::aarch64_sve_dupq_lane:
     return instCombineSVEDupqLane(IC, II);
+  case Intrinsic::aarch64_sve_insr:
+    return instCombineSVEInsr(IC, II);
   }
 
   return std::nullopt;
diff --git a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
index f47ee4b679b87c..e8489c5be85c41 100644
--- a/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
+++ b/llvm/test/Transforms/InstCombine/AArch64/sve-intrinsic-insr.ll
@@ -7,8 +7,7 @@ define <vscale x 16 x i8> @insr_val_into_splatted_val_int(i8 %a) #0 {
 ; CHECK-LABEL: @insr_val_into_splatted_val_int(
 ; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[A:%.*]], i64 0
 ; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 16 x i8> [[T0]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
-; CHECK-NEXT:    [[T2:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.insr.nxv16i8(<vscale x 16 x i8> [[T1]], i8 [[A]])
-; CHECK-NEXT:    ret <vscale x 16 x i8> [[T2]]
+; CHECK-NEXT:    ret <vscale x 16 x i8> [[T1]]
 ;
   %t0 = insertelement <vscale x 16 x i8> poison, i8 %a, i64 0
   %t1 = shufflevector <vscale x 16 x i8> %t0, <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
@@ -18,8 +17,7 @@ define <vscale x 16 x i8> @insr_val_into_splatted_val_int(i8 %a) #0 {
 
 define <vscale x 8 x i16> @insr_five_into_fives() #0 {
 ; CHECK-LABEL: @insr_five_into_fives(
-; CHECK-NEXT:    [[T1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer), i16 5)
-; CHECK-NEXT:    ret <vscale x 8 x i16> [[T1]]
+; CHECK-NEXT:    ret <vscale x 8 x i16> shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> poison, i16 5, i64 0), <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer)
 ;
   %t1 = tail call <vscale x 8 x i16> @llvm.aarch64.sve.insr.nxv8i16(<vscale x 8 x i16> splat (i16 5), i16 5)
   ret <vscale x 8 x i16> %t1
@@ -29,8 +27,7 @@ define <vscale x 4 x float> @insr_val_into_splatted_val_fp(float %a) #0 {
 ; CHECK-LABEL: @insr_val_into_splatted_val_fp(
 ; CHECK-NEXT:    [[T0:%.*]] = insertelement <vscale x 4 x float> poison, float [[A:%.*]], i64 0
 ; CHECK-NEXT:    [[T1:%.*]] = shufflevector <vscale x 4 x float> [[T0]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT:    [[T2:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.insr.nxv4f32(<vscale x 4 x float> [[T1]], float [[A]])
-; CHECK-NEXT:    ret <vscale x 4 x float> [[T2]]
+; CHECK-NEXT:    ret <vscale x 4 x float> [[T1]]
 ;
   %t0 = insertelement <vscale x 4 x float> poison, float %a, i64 0
   %t1 = shufflevector <vscale x 4 x float> %t0, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
@@ -40,8 +37,7 @@ define <vscale x 4 x float> @insr_val_into_splatted_val_fp(float %a) #0 {
 
 define <vscale x 2 x double> @insr_zero_into_zero() #0 {
 ; CHECK-LABEL: @insr_zero_into_zero(
-; CHECK-NEXT:    [[T1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double> zeroinitializer, double 0.000000e+00)
-; CHECK-NEXT:    ret <vscale x 2 x double> [[T1]]
+; CHECK-NEXT:    ret <vscale x 2 x double> zeroinitializer
 ;
   %t1 = tail call <vscale x 2 x double> @llvm.aarch64.sve.insr.nxv2f64(<vscale x 2 x double> zeroinitializer, double zeroinitializer)
   ret <vscale x 2 x double> %t1



More information about the llvm-commits mailing list