[llvm] a4b8979 - [SLP] Add additional tests which caused crashes with versioning.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Thu Oct 21 10:17:53 PDT 2021
Author: Florian Hahn
Date: 2021-10-21T18:17:31+01:00
New Revision: a4b8979a81afe45463860579cd82ee7bb45d1513
URL: https://github.com/llvm/llvm-project/commit/a4b8979a81afe45463860579cd82ee7bb45d1513
DIFF: https://github.com/llvm/llvm-project/commit/a4b8979a81afe45463860579cd82ee7bb45d1513.diff
LOG: [SLP] Add additional tests which caused crashes with versioning.
Added:
Modified:
llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks-in-loops.ll
llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks-in-loops.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks-in-loops.ll
index 6beb1103ceaac..7770be2071c5a 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks-in-loops.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks-in-loops.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -slp-vectorizer -mtriple=arm64-apple-ios -S %s | FileCheck %s
+; RUN: opt -aa-pipeline='basic-aa,scoped-noalias-aa' -passes=slp-vectorizer -mtriple=arm64-apple-darwin -S %s | FileCheck %s
define void @loop1(i32* %A, i32* %B, i64 %N) {
; CHECK-LABEL: @loop1(
@@ -174,3 +175,95 @@ loop:
exit:
ret void
}
+
+; Similar to @loop1, but a load is used in a phi in the same basic block.
+define i32 @value_used_in_phi(i32* %A, i32* %B, i64 %N) {
+; CHECK-LABEL: @value_used_in_phi(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[LOOP:%.*]]
+; CHECK: loop:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[P:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[A_3:%.*]], [[LOOP]] ]
+; CHECK-NEXT: [[B_GEP_0:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[B_0:%.*]] = load i32, i32* [[B_GEP_0]], align 4
+; CHECK-NEXT: [[A_GEP_0:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[A_0:%.*]] = load i32, i32* [[A_GEP_0]], align 4
+; CHECK-NEXT: [[ADD_0:%.*]] = add i32 [[A_0]], 20
+; CHECK-NEXT: [[XOR_0:%.*]] = xor i32 [[ADD_0]], [[B_0]]
+; CHECK-NEXT: store i32 [[XOR_0]], i32* [[A_GEP_0]], align 4
+; CHECK-NEXT: [[IV_1:%.*]] = or i64 [[IV]], 1
+; CHECK-NEXT: [[B_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IV_1]]
+; CHECK-NEXT: [[B_1:%.*]] = load i32, i32* [[B_GEP_1]], align 4
+; CHECK-NEXT: [[A_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IV_1]]
+; CHECK-NEXT: [[A_1:%.*]] = load i32, i32* [[A_GEP_1]], align 4
+; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[A_1]], 20
+; CHECK-NEXT: [[XOR_1:%.*]] = xor i32 [[ADD_1]], [[B_1]]
+; CHECK-NEXT: store i32 [[XOR_1]], i32* [[A_GEP_1]], align 4
+; CHECK-NEXT: [[IV_2:%.*]] = or i64 [[IV]], 2
+; CHECK-NEXT: [[B_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IV_2]]
+; CHECK-NEXT: [[B_2:%.*]] = load i32, i32* [[B_GEP_2]], align 4
+; CHECK-NEXT: [[A_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IV_2]]
+; CHECK-NEXT: [[A_2:%.*]] = load i32, i32* [[A_GEP_2]], align 4
+; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[A_2]], 20
+; CHECK-NEXT: [[XOR_2:%.*]] = xor i32 [[ADD_2]], [[B_2]]
+; CHECK-NEXT: store i32 [[XOR_2]], i32* [[A_GEP_2]], align 4
+; CHECK-NEXT: [[IV_3:%.*]] = or i64 [[IV]], 3
+; CHECK-NEXT: [[B_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IV_3]]
+; CHECK-NEXT: [[B_3:%.*]] = load i32, i32* [[B_GEP_3]], align 4
+; CHECK-NEXT: [[A_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IV_3]]
+; CHECK-NEXT: [[A_3]] = load i32, i32* [[A_GEP_3]], align 4
+; CHECK-NEXT: [[ADD_3:%.*]] = add i32 [[A_3]], 20
+; CHECK-NEXT: [[XOR_3:%.*]] = xor i32 [[ADD_3]], [[B_3]]
+; CHECK-NEXT: store i32 [[XOR_3]], i32* [[A_GEP_3]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 16
+; CHECK-NEXT: [[COND:%.*]] = icmp ult i64 [[IV_NEXT]], [[N:%.*]]
+; CHECK-NEXT: br i1 [[COND]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK: exit:
+; CHECK-NEXT: ret i32 [[P]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %p = phi i32 [ 0, %entry ], [ %A.3, %loop ]
+ %B.gep.0 = getelementptr inbounds i32, i32* %B, i64 %iv
+ %B.0 = load i32, i32* %B.gep.0, align 4
+ %A.gep.0 = getelementptr inbounds i32, i32* %A, i64 %iv
+ %A.0 = load i32, i32* %A.gep.0, align 4
+ %add.0 = add i32 %A.0, 20
+ %xor.0 = xor i32 %add.0, %B.0
+ store i32 %xor.0, i32* %A.gep.0, align 4
+ %iv.1 = or i64 %iv, 1
+ %B.gep.1 = getelementptr inbounds i32, i32* %B, i64 %iv.1
+ %B.1 = load i32, i32* %B.gep.1, align 4
+ %A.gep.1 = getelementptr inbounds i32, i32* %A, i64 %iv.1
+ %A.1 = load i32, i32* %A.gep.1, align 4
+ %add.1 = add i32 %A.1, 20
+ %xor.1 = xor i32 %add.1, %B.1
+ store i32 %xor.1, i32* %A.gep.1, align 4
+ %iv.2 = or i64 %iv, 2
+ %B.gep.2 = getelementptr inbounds i32, i32* %B, i64 %iv.2
+ %B.2 = load i32, i32* %B.gep.2, align 4
+ %A.gep.2 = getelementptr inbounds i32, i32* %A, i64 %iv.2
+ %A.2 = load i32, i32* %A.gep.2, align 4
+ %add.2 = add i32 %A.2, 20
+ %xor.2 = xor i32 %add.2, %B.2
+ store i32 %xor.2, i32* %A.gep.2, align 4
+ %iv.3 = or i64 %iv, 3
+ %B.gep.3 = getelementptr inbounds i32, i32* %B, i64 %iv.3
+ %B.3 = load i32, i32* %B.gep.3, align 4
+ %A.gep.3 = getelementptr inbounds i32, i32* %A, i64 %iv.3
+ %A.3 = load i32, i32* %A.gep.3, align 4
+ %add.3 = add i32 %A.3, 20
+ %xor.3 = xor i32 %add.3, %B.3
+ store i32 %xor.3, i32* %A.gep.3, align 4
+ %iv.next = add nuw nsw i64 %iv, 16
+ %cond = icmp ult i64 %iv.next, %N
+ br i1 %cond, label %loop, label %exit
+
+exit:
+ ret i32 %p
+}
+
+
diff --git a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
index 5d77d5c2a0094..d6bb9bda4bba8 100644
--- a/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/memory-runtime-checks.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -scoped-noalias-aa -slp-vectorizer -mtriple=arm64-apple-darwin -enable-new-pm=false -S %s | FileCheck %s
+; RUN: opt -aa-pipeline='basic-aa,scoped-noalias-aa' -passes=slp-vectorizer -mtriple=arm64-apple-darwin -S %s | FileCheck %s
define void @needs_versioning_not_profitable(i32* %dst, i32* %src) {
; CHECK-LABEL: @needs_versioning_not_profitable(
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll b/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll
index 7a9d0e8f35e5a..f8b29afb8e571 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/memory-runtime-checks.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -scoped-noalias-aa -slp-vectorizer -mtriple=x86_64-apple-darwin -enable-new-pm=false -S %s | FileCheck %s
+; RUN: opt -aa-pipeline='basic-aa,scoped-noalias-aa' -passes=slp-vectorizer -mtriple=x86_64-apple-darwin -S %s | FileCheck %s
define void @version_multiple(i32* nocapture %out_block, i32* nocapture readonly %counter) {
; CHECK-LABEL: @version_multiple(
@@ -185,4 +186,158 @@ bb14: ; preds = %bb13
ret double undef
}
+define void @gather_sequence_crash(<2 x float> %arg, float* %arg1, float %arg2, float* %arg3, float* %arg4, float* %arg5, i1 %c.1, i1 %c.2) {
+; CHECK-LABEL: @gather_sequence_crash(
+; CHECK-NEXT: bb:
+; CHECK-NEXT: br i1 [[C_1:%.*]], label [[BB16:%.*]], label [[BB6:%.*]]
+; CHECK: bb6:
+; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds float, float* [[ARG1:%.*]], i32 4
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds float, float* [[ARG1]], i32 5
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds float, float* [[ARG1]], i32 3
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds float, float* [[ARG1]], i32 6
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <4 x float> <float poison, float poison, float poison, float 0.000000e+00>, float [[ARG2:%.*]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x float> [[ARG:%.*]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x float> [[TMP0]], <4 x float> [[TMP1]], <4 x i32> <i32 0, i32 4, i32 5, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = fmul <4 x float> [[TMP2]], zeroinitializer
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast float* [[TMP8]] to <4 x float>*
+; CHECK-NEXT: store <4 x float> [[TMP3]], <4 x float>* [[TMP4]], align 4
+; CHECK-NEXT: ret void
+; CHECK: bb16:
+; CHECK-NEXT: br label [[BB17:%.*]]
+; CHECK: bb17:
+; CHECK-NEXT: br label [[BB18:%.*]]
+; CHECK: bb18:
+; CHECK-NEXT: br label [[BB19:%.*]]
+; CHECK: bb19:
+; CHECK-NEXT: br label [[BB20:%.*]]
+; CHECK: bb20:
+; CHECK-NEXT: br label [[BB21:%.*]]
+; CHECK: bb21:
+; CHECK-NEXT: br label [[BB22:%.*]]
+; CHECK: bb22:
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds float, float* [[ARG4:%.*]], i32 0
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr float, float* [[TMP23]], i64 7
+; CHECK-NEXT: br i1 [[C_2:%.*]], label [[BB25:%.*]], label [[BB22]]
+; CHECK: bb25:
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr float, float* [[TMP23]], i64 6
+; CHECK-NEXT: store float 0.000000e+00, float* [[TMP24]], align 4
+; CHECK-NEXT: [[TMP27:%.*]] = load float, float* [[ARG5:%.*]], align 4
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr float, float* [[TMP23]], i64 5
+; CHECK-NEXT: [[TMP29:%.*]] = fadd float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: store float 0.000000e+00, float* [[TMP26]], align 4
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr float, float* [[TMP23]], i64 4
+; CHECK-NEXT: store float 0.000000e+00, float* [[TMP28]], align 4
+; CHECK-NEXT: [[TMP31:%.*]] = fadd float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: store float 0.000000e+00, float* [[TMP30]], align 4
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds float, float* [[ARG4]], i32 0
+; CHECK-NEXT: br label [[BB33:%.*]]
+; CHECK: bb33:
+; CHECK-NEXT: br label [[BB34:%.*]]
+; CHECK: bb34:
+; CHECK-NEXT: [[TMP35:%.*]] = getelementptr float, float* [[TMP32]], i64 3
+; CHECK-NEXT: [[TMP36:%.*]] = getelementptr float, float* [[TMP32]], i64 2
+; CHECK-NEXT: [[TMP37:%.*]] = load float, float* [[TMP35]], align 4
+; CHECK-NEXT: [[TMP38:%.*]] = fadd float 0.000000e+00, [[TMP37]]
+; CHECK-NEXT: store float [[TMP38]], float* [[TMP35]], align 4
+; CHECK-NEXT: [[TMP39:%.*]] = getelementptr float, float* [[TMP32]], i64 1
+; CHECK-NEXT: [[TMP40:%.*]] = load float, float* [[TMP36]], align 4
+; CHECK-NEXT: [[TMP41:%.*]] = fadd float 0.000000e+00, [[TMP40]]
+; CHECK-NEXT: store float [[TMP41]], float* [[TMP36]], align 4
+; CHECK-NEXT: [[TMP42:%.*]] = load float, float* [[TMP39]], align 4
+; CHECK-NEXT: [[TMP43:%.*]] = fadd float 0.000000e+00, [[TMP42]]
+; CHECK-NEXT: store float [[TMP43]], float* [[TMP39]], align 4
+; CHECK-NEXT: [[TMP44:%.*]] = load float, float* [[ARG3:%.*]], align 4
+; CHECK-NEXT: [[TMP45:%.*]] = load float, float* [[TMP32]], align 4
+; CHECK-NEXT: [[TMP46:%.*]] = fadd float 0.000000e+00, [[TMP45]]
+; CHECK-NEXT: store float [[TMP46]], float* [[TMP32]], align 4
+; CHECK-NEXT: call void @quux()
+; CHECK-NEXT: br label [[BB47:%.*]]
+; CHECK: bb47:
+; CHECK-NEXT: br label [[BB17]]
+;
+bb:
+ br i1 %c.1, label %bb16, label %bb6
+
+bb6: ; preds = %bb
+ %tmp = getelementptr inbounds float, float* %arg1, i32 4
+ %tmp7 = getelementptr inbounds float, float* %arg1, i32 5
+ %tmp8 = getelementptr inbounds float, float* %arg1, i32 3
+ %tmp9 = getelementptr inbounds float, float* %arg1, i32 6
+ %tmp10 = extractelement <2 x float> %arg, i32 0
+ %tmp11 = fmul float %tmp10, 0.000000e+00
+ store float %tmp11, float* %tmp, align 4
+ %tmp12 = extractelement <2 x float> %arg, i32 1
+ %tmp13 = fmul float %tmp12, 0.000000e+00
+ store float %tmp13, float* %tmp7, align 4
+ %tmp14 = fmul float %arg2, 0.000000e+00
+ store float %tmp14, float* %tmp8, align 4
+ %tmp15 = fmul float 0.000000e+00, 0.000000e+00
+ store float %tmp15, float* %tmp9, align 4
+ ret void
+
+bb16: ; preds = %bb
+ br label %bb17
+
+bb17: ; preds = %bb47, %bb16
+ br label %bb18
+
+bb18: ; preds = %bb17
+ br label %bb19
+
+bb19: ; preds = %bb18
+ br label %bb20
+
+bb20: ; preds = %bb19
+ br label %bb21
+
+bb21: ; preds = %bb20
+ br label %bb22
+
+bb22: ; preds = %bb22, %bb21
+ %tmp23 = getelementptr inbounds float, float* %arg4, i32 0
+ %tmp24 = getelementptr float, float* %tmp23, i64 7
+ br i1 %c.2, label %bb25, label %bb22
+
+bb25: ; preds = %bb22
+ %tmp26 = getelementptr float, float* %tmp23, i64 6
+ store float 0.000000e+00, float* %tmp24, align 4
+ %tmp27 = load float, float* %arg5, align 4
+ %tmp28 = getelementptr float, float* %tmp23, i64 5
+ %tmp29 = fadd float 0.000000e+00, 0.000000e+00
+ store float 0.000000e+00, float* %tmp26, align 4
+ %tmp30 = getelementptr float, float* %tmp23, i64 4
+ store float 0.000000e+00, float* %tmp28, align 4
+ %tmp31 = fadd float 0.000000e+00, 0.000000e+00
+ store float 0.000000e+00, float* %tmp30, align 4
+ %tmp32 = getelementptr inbounds float, float* %arg4, i32 0
+ br label %bb33
+
+bb33: ; preds = %bb25
+ br label %bb34
+
+bb34: ; preds = %bb33
+ %tmp35 = getelementptr float, float* %tmp32, i64 3
+ %tmp36 = getelementptr float, float* %tmp32, i64 2
+ %tmp37 = load float, float* %tmp35, align 4
+ %tmp38 = fadd float 0.000000e+00, %tmp37
+ store float %tmp38, float* %tmp35, align 4
+ %tmp39 = getelementptr float, float* %tmp32, i64 1
+ %tmp40 = load float, float* %tmp36, align 4
+ %tmp41 = fadd float 0.000000e+00, %tmp40
+ store float %tmp41, float* %tmp36, align 4
+ %tmp42 = load float, float* %tmp39, align 4
+ %tmp43 = fadd float 0.000000e+00, %tmp42
+ store float %tmp43, float* %tmp39, align 4
+ %tmp44 = load float, float* %arg3, align 4
+ %tmp45 = load float, float* %tmp32, align 4
+ %tmp46 = fadd float 0.000000e+00, %tmp45
+ store float %tmp46, float* %tmp32, align 4
+ call void @quux()
+ br label %bb47
+
+bb47: ; preds = %bb34
+ br label %bb17
+}
+
+declare void @quux()
attributes #0 = { "target-features"="+avx2" }
More information about the llvm-commits
mailing list