[llvm] ad648c9 - [VectorCombine][NFC][test] Supplement tests of the load-insert-store sequence

Ben Shi via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 13 23:26:55 PDT 2023


Author: Ben Shi
Date: 2023-08-14T14:25:36+08:00
New Revision: ad648c974e873dffd08d8babc57f1eadf774a13e

URL: https://github.com/llvm/llvm-project/commit/ad648c974e873dffd08d8babc57f1eadf774a13e
DIFF: https://github.com/llvm/llvm-project/commit/ad648c974e873dffd08d8babc57f1eadf774a13e.diff

LOG: [VectorCombine][NFC][test] Supplement tests of the load-insert-store sequence

The newly added tests are all about scalable vector types.

Reviewed By: MaskRay

Differential Revision: https://reviews.llvm.org/D157098

Added: 
    

Modified: 
    llvm/test/Transforms/VectorCombine/load-insert-store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/VectorCombine/load-insert-store.ll b/llvm/test/Transforms/VectorCombine/load-insert-store.ll
index 7f3dc7dea15a88..8d847af8d006d2 100644
--- a/llvm/test/Transforms/VectorCombine/load-insert-store.ll
+++ b/llvm/test/Transforms/VectorCombine/load-insert-store.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -passes=vector-combine -data-layout=e < %s | FileCheck %s
-; RUN: opt -S -passes=vector-combine -data-layout=E < %s | FileCheck %s
+; RUN: opt -S -passes=vector-combine -data-layout=e < %s | FileCheck %s --check-prefixes=CHECK,LE
+; RUN: opt -S -passes=vector-combine -data-layout=E < %s | FileCheck %s --check-prefixes=CHECK,BE
 
 define void @insert_store(ptr %q, i8 zeroext %s) {
 ; CHECK-LABEL: @insert_store(
@@ -61,6 +61,23 @@ entry:
   ret void
 }
 
+; To verify the case that index exceeds the minium number
+; of elements of a scalable vector type.
+define void @insert_store_vscale_exceeds(ptr %q, i16 zeroext %s) {
+; CHECK-LABEL: @insert_store_vscale_exceeds(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 8 x i16>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 8 x i16> [[TMP0]], i16 [[S:%.*]], i32 9
+; CHECK-NEXT:    store <vscale x 8 x i16> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load <vscale x 8 x i16>, ptr %q
+  %vecins = insertelement <vscale x 8 x i16> %0, i16 %s, i32 9
+  store <vscale x 8 x i16> %vecins, ptr %q
+  ret void
+}
+
 define void @insert_store_v9i4(ptr %q, i4 zeroext %s) {
 ; CHECK-LABEL: @insert_store_v9i4(
 ; CHECK-NEXT:  entry:
@@ -125,6 +142,23 @@ entry:
   ret void
 }
 
+; To verify the case that the index is not a constant, and
+; the vector type is scalable.
+define void @insert_store_vscale_nonconst(ptr %q, i8 zeroext %s, i32 %idx) {
+; CHECK-LABEL: @insert_store_vscale_nonconst(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S:%.*]], i32 [[IDX:%.*]]
+; CHECK-NEXT:    store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load <vscale x 16 x i8>, ptr %q
+  %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+  store <vscale x 16 x i8> %vecins, ptr %q
+  ret void
+}
+
 ; To verify align here is narrowed to scalar store size
 define void @insert_store_nonconst_large_alignment(ptr %q, i32 zeroext %s, i32 %idx) {
 ; CHECK-LABEL: @insert_store_nonconst_large_alignment(
@@ -210,6 +244,27 @@ entry:
   ret void
 }
 
+; To verify the index is not a constant but valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; CHECK-LABEL: @insert_store_vscale_nonconst_index_known_valid_by_assume(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[IDX:%.*]], 4
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S:%.*]], i32 [[IDX]]
+; CHECK-NEXT:    store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %cmp = icmp ult i32 %idx, 4
+  call void @llvm.assume(i1 %cmp)
+  %0 = load <vscale x 16 x i8>, ptr %q
+  %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+  store <vscale x 16 x i8> %vecins, ptr %q
+  ret void
+}
+
 declare void @maythrow() readnone
 
 define void @insert_store_nonconst_index_not_known_valid_by_assume_after_load(ptr %q, i8 zeroext %s, i32 %idx) {
@@ -252,6 +307,27 @@ entry:
   ret void
 }
 
+; To verify the index is not a constant and may not be valid by assume,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_assume(ptr %q, i8 zeroext %s, i32 %idx) {
+; CHECK-LABEL: @insert_store_vscale_nonconst_index_not_known_valid_by_assume(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[CMP:%.*]] = icmp ult i32 [[IDX:%.*]], 17
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S:%.*]], i32 [[IDX]]
+; CHECK-NEXT:    store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %cmp = icmp ult i32 %idx, 17
+  call void @llvm.assume(i1 %cmp)
+  %0 = load <vscale x 16 x i8>, ptr %q
+  %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx
+  store <vscale x 16 x i8> %vecins, ptr %q
+  ret void
+}
+
 declare void @llvm.assume(i1)
 
 define void @insert_store_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
@@ -270,6 +346,25 @@ entry:
   ret void
 }
 
+; To verify the index is not a constant but valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; CHECK-LABEL: @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_and(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = and i32 [[IDX:%.*]], 7
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S:%.*]], i32 [[IDX_CLAMPED]]
+; CHECK-NEXT:    store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load <vscale x 16 x i8>, ptr %q
+  %idx.clamped = and i32 %idx, 7
+  %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+  store <vscale x 16 x i8> %vecins, ptr %q
+  ret void
+}
+
 define void @insert_store_nonconst_index_base_frozen_and_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
 ; CHECK-LABEL: @insert_store_nonconst_index_base_frozen_and_valid_by_and(
 ; CHECK-NEXT:  entry:
@@ -310,10 +405,10 @@ entry:
 define void @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
 ; CHECK-LABEL: @insert_store_nonconst_index_known_valid_by_and_but_may_be_poison(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = freeze i32 [[IDX:%.*]]
-; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = and i32 [[TMP0]], 7
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds <16 x i8>, ptr [[Q:%.*]], i32 0, i32 [[IDX_CLAMPED]]
-; CHECK-NEXT:    store i8 [[S:%.*]], ptr [[TMP1]], align 1
+; CHECK-NEXT:    [[IDX_FROZEN:%.*]] = freeze i32 [[IDX:%.*]]
+; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = and i32 [[IDX_FROZEN]], 7
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds <16 x i8>, ptr [[Q:%.*]], i32 0, i32 [[IDX_CLAMPED]]
+; CHECK-NEXT:    store i8 [[S:%.*]], ptr [[TMP0]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -357,6 +452,26 @@ entry:
   store <16 x i8> %vecins, ptr %q
   ret void
 }
+
+; To verify the index is not a constant and may not be valid by and,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_and(ptr %q, i8 zeroext %s, i32 %idx) {
+; CHECK-LABEL: @insert_store_vscale_nonconst_index_not_known_valid_by_and(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = and i32 [[IDX:%.*]], 31
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S:%.*]], i32 [[IDX_CLAMPED]]
+; CHECK-NEXT:    store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load <vscale x 16 x i8>, ptr %q
+  %idx.clamped = and i32 %idx, 31
+  %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+  store <vscale x 16 x i8> %vecins, ptr %q
+  ret void
+}
+
 define void @insert_store_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
 ; CHECK-LABEL: @insert_store_nonconst_index_known_noundef_and_valid_by_urem(
 ; CHECK-NEXT:  entry:
@@ -373,6 +488,25 @@ entry:
   ret void
 }
 
+; To verify the index is not a constant but valid by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
+; CHECK-LABEL: @insert_store_vscale_nonconst_index_known_noundef_and_valid_by_urem(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = urem i32 [[IDX:%.*]], 16
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S:%.*]], i32 [[IDX_CLAMPED]]
+; CHECK-NEXT:    store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load <vscale x 16 x i8>, ptr %q
+  %idx.clamped = urem i32 %idx, 16
+  %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+  store <vscale x 16 x i8> %vecins, ptr %q
+  ret void
+}
+
 define void @insert_store_nonconst_index_base_frozen_and_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
 ; CHECK-LABEL: @insert_store_nonconst_index_base_frozen_and_valid_by_urem(
 ; CHECK-NEXT:  entry:
@@ -413,10 +547,10 @@ entry:
 define void @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(ptr %q, i8 zeroext %s, i32 %idx) {
 ; CHECK-LABEL: @insert_store_nonconst_index_known_valid_by_urem_but_may_be_poison(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = freeze i32 [[IDX:%.*]]
-; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = urem i32 [[TMP0]], 16
-; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds <16 x i8>, ptr [[Q:%.*]], i32 0, i32 [[IDX_CLAMPED]]
-; CHECK-NEXT:    store i8 [[S:%.*]], ptr [[TMP1]], align 1
+; CHECK-NEXT:    [[IDX_FROZEN:%.*]] = freeze i32 [[IDX:%.*]]
+; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = urem i32 [[IDX_FROZEN]], 16
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds <16 x i8>, ptr [[Q:%.*]], i32 0, i32 [[IDX_CLAMPED]]
+; CHECK-NEXT:    store i8 [[S:%.*]], ptr [[TMP0]], align 1
 ; CHECK-NEXT:    ret void
 ;
 entry:
@@ -444,6 +578,25 @@ entry:
   ret void
 }
 
+; To verify the index is not a constant and may not be vaild by urem,
+; for scalable vector types.
+define void @insert_store_vscale_nonconst_index_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 %idx) {
+; CHECK-LABEL: @insert_store_vscale_nonconst_index_not_known_valid_by_urem(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load <vscale x 16 x i8>, ptr [[Q:%.*]], align 16
+; CHECK-NEXT:    [[IDX_CLAMPED:%.*]] = urem i32 [[IDX:%.*]], 17
+; CHECK-NEXT:    [[VECINS:%.*]] = insertelement <vscale x 16 x i8> [[TMP0]], i8 [[S:%.*]], i32 [[IDX_CLAMPED]]
+; CHECK-NEXT:    store <vscale x 16 x i8> [[VECINS]], ptr [[Q]], align 16
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load <vscale x 16 x i8>, ptr %q
+  %idx.clamped = urem i32 %idx, 17
+  %vecins = insertelement <vscale x 16 x i8> %0, i8 %s, i32 %idx.clamped
+  store <vscale x 16 x i8> %vecins, ptr %q
+  ret void
+}
+
 define void @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(ptr %q, i8 zeroext %s, i32 noundef %idx) {
 ; CHECK-LABEL: @insert_store_nonconst_index_known_noundef_not_known_valid_by_urem(
 ; CHECK-NEXT:  entry:
@@ -665,3 +818,6 @@ bb:
 
 declare i32 @bar(i32, i1) readonly
 declare double @llvm.log2.f64(double)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; BE: {{.*}}
+; LE: {{.*}}


        


More information about the llvm-commits mailing list