[llvm] 80e49f4 - [ConstraintElimination] Bail out for GEPs with scalable vectors.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Mon Oct 10 08:01:44 PDT 2022


Author: Florian Hahn
Date: 2022-10-10T16:01:20+01:00
New Revision: 80e49f49e457a53eca9bc8171602bf6fde7f3ecb

URL: https://github.com/llvm/llvm-project/commit/80e49f49e457a53eca9bc8171602bf6fde7f3ecb
DIFF: https://github.com/llvm/llvm-project/commit/80e49f49e457a53eca9bc8171602bf6fde7f3ecb.diff

LOG: [ConstraintElimination] Bail out for GEPs with scalable vectors.

This fixes a crash with scalable vectors, thanks @nikic for spotting
this!

Added: 
    

Modified: 
    llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
    llvm/test/Transforms/ConstraintElimination/gep-arithmetic-different-types.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index f66f7644403d5..9c9249b2cfe00 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -212,7 +212,11 @@ decompose(Value *V, SmallVector<PreconditionTy, 4> &Preconditions,
     Value *Op0, *Op1;
     ConstantInt *CI;
 
+    // Bail out for scalable vectors for now.
     auto GTI = gep_type_begin(GEP);
+    if (isa<ScalableVectorType>(GTI.getIndexedType()))
+      return {};
+
     int64_t Scale = static_cast<int64_t>(
         DL.getTypeAllocSize(GTI.getIndexedType()).getFixedSize());
     int64_t MulRes;

diff  --git a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-
diff erent-types.ll b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-
diff erent-types.ll
index dc7253b454871..3cc3e03eedf17 100644
--- a/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-
diff erent-types.ll
+++ b/llvm/test/Transforms/ConstraintElimination/gep-arithmetic-
diff erent-types.ll
@@ -404,3 +404,53 @@ define i1 @gep_add_nsw_positive_index_struct(ptr %A, ptr %upper, i8 %idx) {
 
   ret i1 %res.3
 }
+
+define i1 @gep_constant_positive_index_fixed_vector_ty(ptr %A, ptr %upper) {
+; CHECK-LABEL: @gep_constant_positive_index_fixed_vector_ty(
+; CHECK-NEXT:    [[ADD_I8_4:%.*]] = getelementptr inbounds <4 x i8>, ptr [[A:%.*]], i64 4
+; CHECK-NEXT:    [[C_0:%.*]] = icmp ult ptr [[ADD_I8_4]], [[UPPER:%.*]]
+; CHECK-NEXT:    call void @llvm.assume(i1 [[C_0]])
+; CHECK-NEXT:    [[ADD_I16_4:%.*]] = getelementptr inbounds <4 x i16>, ptr [[A]], i64 4
+; CHECK-NEXT:    [[C_1:%.*]] = icmp ult ptr [[ADD_I16_4]], [[UPPER]]
+; CHECK-NEXT:    [[ADD_I16_2:%.*]] = getelementptr inbounds <4 x i16>, ptr [[A]], i64 2
+; CHECK-NEXT:    [[T_1:%.*]] = icmp ult ptr [[ADD_I16_2]], [[UPPER]]
+; CHECK-NEXT:    [[RES_1:%.*]] = xor i1 [[C_1]], true
+; CHECK-NEXT:    ret i1 [[RES_1]]
+;
+  %add.i8.4 = getelementptr inbounds <4 x i8>, ptr %A, i64 4
+  %c.0 = icmp ult ptr %add.i8.4, %upper
+  call void @llvm.assume(i1 %c.0)
+
+  %add.i16.4 = getelementptr inbounds <4 x i16>, ptr %A, i64 4
+  %c.1 = icmp ult ptr %add.i16.4, %upper
+
+  %add.i16.2 = getelementptr inbounds <4 x i16>, ptr %A, i64 2
+  %t.1 = icmp ult ptr %add.i16.2, %upper
+  %res.1 = xor i1 %c.1, %t.1
+  ret i1 %res.1
+}
+
+define i1 @gep_constant_positive_index_scalable_vector_ty(ptr %A, ptr %upper) {
+; CHECK-LABEL: @gep_constant_positive_index_scalable_vector_ty(
+; CHECK-NEXT:    [[ADD_I8_4:%.*]] = getelementptr inbounds <vscale x 4 x i8>, ptr [[A:%.*]], i64 4
+; CHECK-NEXT:    [[C_0:%.*]] = icmp ult ptr [[ADD_I8_4]], [[UPPER:%.*]]
+; CHECK-NEXT:    call void @llvm.assume(i1 [[C_0]])
+; CHECK-NEXT:    [[ADD_I16_4:%.*]] = getelementptr inbounds <vscale x 4 x i16>, ptr [[A]], i64 4
+; CHECK-NEXT:    [[C_1:%.*]] = icmp ult ptr [[ADD_I16_4]], [[UPPER]]
+; CHECK-NEXT:    [[ADD_I16_2:%.*]] = getelementptr inbounds <vscale x 4 x i16>, ptr [[A]], i64 2
+; CHECK-NEXT:    [[T_1:%.*]] = icmp ult ptr [[ADD_I16_2]], [[UPPER]]
+; CHECK-NEXT:    [[RES_1:%.*]] = xor i1 [[C_1]], [[T_1]]
+; CHECK-NEXT:    ret i1 [[RES_1]]
+;
+  %add.i8.4 = getelementptr inbounds <vscale x 4 x i8>, ptr %A, i64 4
+  %c.0 = icmp ult ptr %add.i8.4, %upper
+  call void @llvm.assume(i1 %c.0)
+
+  %add.i16.4 = getelementptr inbounds <vscale x 4 x i16>, ptr %A, i64 4
+  %c.1 = icmp ult ptr %add.i16.4, %upper
+
+  %add.i16.2 = getelementptr inbounds <vscale x 4 x i16>, ptr %A, i64 2
+  %t.1 = icmp ult ptr %add.i16.2, %upper
+  %res.1 = xor i1 %c.1, %t.1
+  ret i1 %res.1
+}


        


More information about the llvm-commits mailing list