[llvm] 2ed7db0 - [InstSimplify] Remove redundant {insert,extract}_vector intrinsic chains

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu May 13 11:32:58 PDT 2021


It looks like you can probably generalize these for any constant 
offset.  Were you planning to do that in a follow up?

Philip

On 5/13/2021 9:10 AM, Joe Ellis via llvm-commits wrote:
> Author: Joe Ellis
> Date: 2021-05-13T16:09:50Z
> New Revision: 2ed7db0d206b6af2fffa4cb2704264b76ca61266
>
> URL: https://github.com/llvm/llvm-project/commit/2ed7db0d206b6af2fffa4cb2704264b76ca61266
> DIFF: https://github.com/llvm/llvm-project/commit/2ed7db0d206b6af2fffa4cb2704264b76ca61266.diff
>
> LOG: [InstSimplify] Remove redundant {insert,extract}_vector intrinsic chains
>
> This commit removes some redundant {insert,extract}_vector intrinsic
> chains by implementing the following patterns as instsimplifies:
>
>     (insert_vector _, (extract_vector X, 0), 0) -> X
>     (extract_vector (insert_vector _, X, 0), 0) -> X
>
> Reviewed By: peterwaller-arm
>
> Differential Revision: https://reviews.llvm.org/D101986
>
> Added:
>      llvm/test/Transforms/InstSimplify/extract-vector.ll
>      llvm/test/Transforms/InstSimplify/insert-vector.ll
>
> Modified:
>      clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
>      clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
>      llvm/lib/Analysis/InstructionSimplify.cpp
>
> Removed:
>      
>
>
> ################################################################################
> diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
> index f988d54bacd4f..edc307745a2aa 100644
> --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
> +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c
> @@ -24,11 +24,7 @@ svint32_t sizeless_callee(svint32_t x) {
>   
>   // CHECK-LABEL: @fixed_caller(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[X]], i64 0)
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[CASTSCALABLESVE]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE1]]
> +// CHECK-NEXT:    ret <vscale x 4 x i32> [[X_COERCE:%.*]]
>   //
>   fixed_int32_t fixed_caller(fixed_int32_t x) {
>     return sizeless_callee(x);
> @@ -36,9 +32,7 @@ fixed_int32_t fixed_caller(fixed_int32_t x) {
>   
>   // CHECK-LABEL: @fixed_callee(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[X]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
> +// CHECK-NEXT:    ret <vscale x 4 x i32> [[X_COERCE:%.*]]
>   //
>   fixed_int32_t fixed_callee(fixed_int32_t x) {
>     return x;
> @@ -47,12 +41,9 @@ fixed_int32_t fixed_callee(fixed_int32_t x) {
>   // CHECK-LABEL: @sizeless_caller(
>   // CHECK-NEXT:  entry:
>   // CHECK-NEXT:    [[COERCE1:%.*]] = alloca <16 x i32>, align 16
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[X:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    [[CALL:%.*]] = call <vscale x 4 x i32> @fixed_callee(<vscale x 4 x i32> [[CASTSCALABLESVE]])
>   // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i32>* [[COERCE1]] to <vscale x 4 x i32>*
> -// CHECK-NEXT:    store <vscale x 4 x i32> [[CALL]], <vscale x 4 x i32>* [[TMP0]], align 16
> -// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6:!tbaa !.*]]
> +// CHECK-NEXT:    store <vscale x 4 x i32> [[X:%.*]], <vscale x 4 x i32>* [[TMP0]], align 16
> +// CHECK-NEXT:    [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]]
>   // CHECK-NEXT:    [[CASTSCALABLESVE2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TMP1]], i64 0)
>   // CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE2]]
>   //
> @@ -66,15 +57,9 @@ svint32_t sizeless_caller(svint32_t x) {
>   
>   // CHECK-LABEL: @call_int32_ff(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP1_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[OP2:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP2_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP2]], i64 0)
>   // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
> -// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[CASTSCALABLESVE2]])
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE3:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE3]]
> +// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32> [[OP2_COERCE:%.*]])
> +// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
>   //
>   fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) {
>     return svsel(pg, op1, op2);
> @@ -82,15 +67,9 @@ fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) {
>   
>   // CHECK-LABEL: @call_float64_ff(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP1_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[OP2:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP2_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE2:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP2]], i64 0)
>   // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
> -// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[CASTSCALABLESVE2]])
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE3:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE3]]
> +// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double> [[OP2_COERCE:%.*]])
> +// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
>   //
>   fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) {
>     return svsel(pg, op1, op2);
> @@ -106,20 +85,20 @@ fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_
>   // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
>   // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
>   // CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
> -// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to <vscale x 16 x i1>*
>   // CHECK-NEXT:    store <vscale x 16 x i1> [[OP2_COERCE:%.*]], <vscale x 16 x i1>* [[TMP1]], align 16
> -// CHECK-NEXT:    [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, [[TBAA6]]
> -// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]]
> -// CHECK-NEXT:    store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, !tbaa [[TBAA6]]
> +// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]]
> +// CHECK-NEXT:    store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
> -// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP2]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP3:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP2]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[TMP4:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to <vscale x 16 x i1>*
> -// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP4]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP4]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1> [[TMP5]])
> -// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]]
> +// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP6]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9:![0-9]+]]
>   // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
> -// CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
>   // CHECK-NEXT:    store <8 x i8> [[TMP7]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
>   // CHECK-NEXT:    [[TMP8:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
> @@ -135,13 +114,9 @@ fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) {
>   
>   // CHECK-LABEL: @call_int32_fs(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[OP1_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[OP1]], i64 0)
>   // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
> -// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[CASTSCALABLESVE]], <vscale x 4 x i32> [[OP2:%.*]])
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE1:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE1]]
> +// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1_COERCE:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
> +// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
>   //
>   fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) {
>     return svsel(pg, op1, op2);
> @@ -149,13 +124,9 @@ fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) {
>   
>   // CHECK-LABEL: @call_float64_fs(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[OP1_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[OP1]], i64 0)
>   // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
> -// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[CASTSCALABLESVE]], <vscale x 2 x double> [[OP2:%.*]])
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE1:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE1]]
> +// CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1_COERCE:%.*]], <vscale x 2 x double> [[OP2:%.*]])
> +// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
>   //
>   fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) {
>     return svsel(pg, op1, op2);
> @@ -169,14 +140,14 @@ fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op
>   // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
>   // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to <vscale x 16 x i1>*
>   // CHECK-NEXT:    store <vscale x 16 x i1> [[OP1_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
> -// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]]
> -// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]]
> +// CHECK-NEXT:    store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to <vscale x 16 x i1>*
> -// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[TMP2]], <vscale x 16 x i1> [[OP2:%.*]])
> -// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
> +// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP3]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]]
>   // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
> -// CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
>   // CHECK-NEXT:    store <8 x i8> [[TMP4]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
>   // CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
> @@ -194,9 +165,7 @@ fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) {
>   // CHECK-NEXT:  entry:
>   // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
>   // CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 4 x i32> @llvm.aarch64.sve.sel.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TMP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
> +// CHECK-NEXT:    ret <vscale x 4 x i32> [[TMP1]]
>   //
>   fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) {
>     return svsel(pg, op1, op2);
> @@ -206,9 +175,7 @@ fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) {
>   // CHECK-NEXT:  entry:
>   // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
>   // CHECK-NEXT:    [[TMP1:%.*]] = call <vscale x 2 x double> @llvm.aarch64.sve.sel.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]])
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TMP1]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
> +// CHECK-NEXT:    ret <vscale x 2 x double> [[TMP1]]
>   //
>   fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) {
>     return svsel(pg, op1, op2);
> @@ -219,9 +186,9 @@ fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) {
>   // CHECK-NEXT:    [[SAVED_CALL_RVALUE:%.*]] = alloca <vscale x 16 x i1>, align 16
>   // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
>   // CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.sel.nxv16i1(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[OP2:%.*]])
> -// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]]
> +// CHECK-NEXT:    store <vscale x 16 x i1> [[TMP0]], <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]]
>   // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = bitcast <vscale x 16 x i1>* [[SAVED_CALL_RVALUE]] to <8 x i8>*
> -// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
>   // CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
>   // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
>
> diff  --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
> index 448e0a82a9b45..6dc87aa37af8d 100644
> --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
> +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c
> @@ -13,9 +13,7 @@ typedef int32_t gnu_int32_t __attribute__((vector_size(N / 8)));
>   
>   // CHECK-LABEL: @to_svint32_t(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
> +// CHECK-NEXT:    ret <vscale x 4 x i32> [[TYPE_COERCE:%.*]]
>   //
>   svint32_t to_svint32_t(fixed_int32_t type) {
>     return type;
> @@ -23,9 +21,7 @@ svint32_t to_svint32_t(fixed_int32_t type) {
>   
>   // CHECK-LABEL: @from_svint32_t(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
> +// CHECK-NEXT:    ret <vscale x 4 x i32> [[TYPE:%.*]]
>   //
>   fixed_int32_t from_svint32_t(svint32_t type) {
>     return type;
> @@ -33,9 +29,7 @@ fixed_int32_t from_svint32_t(svint32_t type) {
>   
>   // CHECK-LABEL: @to_svfloat64_t(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[TYPE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TYPE_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[TYPE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
> +// CHECK-NEXT:    ret <vscale x 2 x double> [[TYPE_COERCE:%.*]]
>   //
>   svfloat64_t to_svfloat64_t(fixed_float64_t type) {
>     return type;
> @@ -43,9 +37,7 @@ svfloat64_t to_svfloat64_t(fixed_float64_t type) {
>   
>   // CHECK-LABEL: @from_svfloat64_t(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64(<vscale x 2 x double> [[TYPE:%.*]], i64 0)
> -// CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 2 x double> @llvm.experimental.vector.insert.nxv2f64.v8f64(<vscale x 2 x double> undef, <8 x double> [[CASTFIXEDSVE]], i64 0)
> -// CHECK-NEXT:    ret <vscale x 2 x double> [[CASTSCALABLESVE]]
> +// CHECK-NEXT:    ret <vscale x 2 x double> [[TYPE:%.*]]
>   //
>   fixed_float64_t from_svfloat64_t(svfloat64_t type) {
>     return type;
> @@ -57,10 +49,10 @@ fixed_float64_t from_svfloat64_t(svfloat64_t type) {
>   // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <8 x i8>, align 16
>   // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to <vscale x 16 x i1>*
>   // CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE_COERCE:%.*]], <vscale x 16 x i1>* [[TMP0]], align 16
> -// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, [[TBAA6:!tbaa !.*]]
> -// CHECK-NEXT:    store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, !tbaa [[TBAA6:![0-9]+]]
> +// CHECK-NEXT:    store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to <vscale x 16 x i1>*
> -// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[TMP1]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    ret <vscale x 16 x i1> [[TMP2]]
>   //
>   svbool_t to_svbool_t(fixed_bool_t type) {
> @@ -71,9 +63,9 @@ svbool_t to_svbool_t(fixed_bool_t type) {
>   // CHECK-NEXT:  entry:
>   // CHECK-NEXT:    [[TYPE_ADDR:%.*]] = alloca <vscale x 16 x i1>, align 16
>   // CHECK-NEXT:    [[RETVAL_COERCE:%.*]] = alloca <vscale x 16 x i1>, align 16
> -// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, [[TBAA9:!tbaa !.*]]
> +// CHECK-NEXT:    store <vscale x 16 x i1> [[TYPE:%.*]], <vscale x 16 x i1>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]]
>   // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <vscale x 16 x i1>* [[TYPE_ADDR]] to <8 x i8>*
> -// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[RETVAL_0__SROA_CAST:%.*]] = bitcast <vscale x 16 x i1>* [[RETVAL_COERCE]] to <8 x i8>*
>   // CHECK-NEXT:    store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16
>   // CHECK-NEXT:    [[TMP2:%.*]] = load <vscale x 16 x i1>, <vscale x 16 x i1>* [[RETVAL_COERCE]], align 16
> @@ -85,7 +77,7 @@ fixed_bool_t from_svbool_t(svbool_t type) {
>   
>   // CHECK-LABEL: @to_svint32_t__from_gnu_int32_t(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
>   // CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
>   //
> @@ -96,7 +88,7 @@ svint32_t to_svint32_t__from_gnu_int32_t(gnu_int32_t type) {
>   // CHECK-LABEL: @from_svint32_t__to_gnu_int32_t(
>   // CHECK-NEXT:  entry:
>   // CHECK-NEXT:    [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE:%.*]], i64 0)
> -// CHECK-NEXT:    store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    ret void
>   //
>   gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) {
> @@ -105,7 +97,7 @@ gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) {
>   
>   // CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t(
>   // CHECK-NEXT:  entry:
> -// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    [[CASTSCALABLESVE:%.*]] = call <vscale x 4 x i32> @llvm.experimental.vector.insert.nxv4i32.v16i32(<vscale x 4 x i32> undef, <16 x i32> [[TYPE]], i64 0)
>   // CHECK-NEXT:    ret <vscale x 4 x i32> [[CASTSCALABLESVE]]
>   //
> @@ -116,7 +108,7 @@ fixed_int32_t to_fixed_int32_t__from_gnu_int32_t(gnu_int32_t type) {
>   // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t(
>   // CHECK-NEXT:  entry:
>   // CHECK-NEXT:    [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32(<vscale x 4 x i32> [[TYPE_COERCE:%.*]], i64 0)
> -// CHECK-NEXT:    store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]]
> +// CHECK-NEXT:    store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]]
>   // CHECK-NEXT:    ret void
>   //
>   gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) {
>
> diff  --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
> index a916c777081de..f91c534fc8264 100644
> --- a/llvm/lib/Analysis/InstructionSimplify.cpp
> +++ b/llvm/lib/Analysis/InstructionSimplify.cpp
> @@ -5625,6 +5625,19 @@ static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
>   
>       break;
>     }
> +  case Intrinsic::experimental_vector_extract: {
> +    Type *ReturnType = F->getReturnType();
> +
> +    // (extract_vector (insert_vector _, X, 0), 0) -> X
> +    unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
> +    Value *X = nullptr;
> +    if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
> +                       m_Value(), m_Value(X), m_Zero())) &&
> +        IdxN == 0 && X->getType() == ReturnType)
> +      return X;
> +
> +    break;
> +  }
>     default:
>       break;
>     }
> @@ -5720,6 +5733,21 @@ static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
>   
>       return nullptr;
>     }
> +  case Intrinsic::experimental_vector_insert: {
> +    Value *SubVec = Call->getArgOperand(1);
> +    Value *Idx = Call->getArgOperand(2);
> +    Type *ReturnType = F->getReturnType();
> +
> +    // (insert_vector _, (extract_vector X, 0), 0) -> X
> +    unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
> +    Value *X = nullptr;
> +    if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
> +                          m_Value(X), m_Zero())) &&
> +        IdxN == 0 && X->getType() == ReturnType)
> +      return X;
> +
> +    return nullptr;
> +  }
>     default:
>       return nullptr;
>     }
>
> diff  --git a/llvm/test/Transforms/InstSimplify/extract-vector.ll b/llvm/test/Transforms/InstSimplify/extract-vector.ll
> new file mode 100644
> index 0000000000000..8bc29545dabae
> --- /dev/null
> +++ b/llvm/test/Transforms/InstSimplify/extract-vector.ll
> @@ -0,0 +1,26 @@
> +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
> +; RUN: opt < %s -instsimplify -S | FileCheck %s
> +
> +define <16 x i8> @redundant_insert_extract_chain(<16 x i8> %x) {
> +; CHECK-LABEL: @redundant_insert_extract_chain(
> +; CHECK-NEXT:    ret <16 x i8> [[X:%.*]]
> +;
> +  %inserted = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
> +  %extracted = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
> +  ret <16 x i8> %extracted
> +}
> +
> +define <8 x i8> @non_redundant_insert_extract_chain(<16 x i8> %x) {
> +; CHECK-LABEL: @non_redundant_insert_extract_chain(
> +; CHECK-NEXT:    [[INSERTED:%.*]] = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> [[X:%.*]], i64 0)
> +; CHECK-NEXT:    [[EXTRACTED:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> [[INSERTED]], i64 0)
> +; CHECK-NEXT:    ret <8 x i8> [[EXTRACTED]]
> +;
> +  %inserted = call <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8> undef, <16 x i8> %x, i64 0)
> +  %extracted = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8> %inserted, i64 0)
> +  ret <8 x i8> %extracted
> +}
> +
> +declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(<vscale x 32 x i8>, i64)
> +declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(<vscale x 32 x i8>, i64)
> +declare <vscale x 32 x i8> @llvm.experimental.vector.insert.nxv32i8.v16i8(<vscale x 32 x i8>, <16 x i8>, i64)
>
> diff  --git a/llvm/test/Transforms/InstSimplify/insert-vector.ll b/llvm/test/Transforms/InstSimplify/insert-vector.ll
> new file mode 100644
> index 0000000000000..99becf3aad375
> --- /dev/null
> +++ b/llvm/test/Transforms/InstSimplify/insert-vector.ll
> @@ -0,0 +1,26 @@
> +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
> +; RUN: opt < %s -instsimplify -S | FileCheck %s
> +
> +define <vscale x 16 x i8> @redundant_extract_insert_chain(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %x) {
> +; CHECK-LABEL: @redundant_extract_insert_chain(
> +; CHECK-NEXT:    ret <vscale x 16 x i8> [[X:%.*]]
> +;
> +  %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8> %x, i64 0)
> +  %inserted = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
> +  ret <vscale x 16 x i8> %inserted
> +}
> +
> +define <vscale x 16 x i8> @non_redundant_extract_insert_chain(<vscale x 16 x i1> %pg, <vscale x 32 x i8> %x) {
> +; CHECK-LABEL: @non_redundant_extract_insert_chain(
> +; CHECK-NEXT:    [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> [[X:%.*]], i64 0)
> +; CHECK-NEXT:    [[INSERTED:%.*]] = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> [[EXTRACTED]], i64 0)
> +; CHECK-NEXT:    ret <vscale x 16 x i8> [[INSERTED]]
> +;
> +  %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8> %x, i64 0)
> +  %inserted = call <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8> undef, <32 x i8> %extracted, i64 0)
> +  ret <vscale x 16 x i8> %inserted
> +}
> +
> +declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(<vscale x 16 x i8>, i64)
> +declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(<vscale x 32 x i8>, i64)
> +declare <vscale x 16 x i8> @llvm.experimental.vector.insert.nxv16i8.v32i8(<vscale x 16 x i8>, <32 x i8>, i64)
>
>
>          
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits


More information about the llvm-commits mailing list