[llvm] 84f4b1e - Reland "[LoopVectorize] Support vectorization of overflow intrinsics" (#180526)

Benjamin Maxwell via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 9 07:33:16 PST 2026


Author: Vishruth Thimmaiah
Date: 2026-02-09T15:32:04Z
New Revision: 84f4b1e52df736d4bd0bcf821c7fbe06f3534042

URL: https://github.com/llvm/llvm-project/commit/84f4b1e52df736d4bd0bcf821c7fbe06f3534042
DIFF: https://github.com/llvm/llvm-project/commit/84f4b1e52df736d4bd0bcf821c7fbe06f3534042.diff

LOG: Reland "[LoopVectorize] Support vectorization of overflow intrinsics" (#180526)

Enables support for marking overflow intrinsics `uadd`, `sadd`, `usub`,
`ssub`, `umul` and `smul` as trivially vectorizable.

Fixes #174617

---

This patch is a reland of #174835.

Reverts #179819

Added: 
    

Modified: 
    llvm/lib/Analysis/VectorUtils.cpp
    llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
    llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
    llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
    llvm/test/Transforms/LoopVectorize/struct-return.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/VectorUtils.cpp b/llvm/lib/Analysis/VectorUtils.cpp
index 79723c9815445..d4083c49626fe 100644
--- a/llvm/lib/Analysis/VectorUtils.cpp
+++ b/llvm/lib/Analysis/VectorUtils.cpp
@@ -65,6 +65,12 @@ bool llvm::isTriviallyVectorizable(Intrinsic::ID ID) {
   case Intrinsic::smul_fix_sat:
   case Intrinsic::umul_fix:
   case Intrinsic::umul_fix_sat:
+  case Intrinsic::uadd_with_overflow:
+  case Intrinsic::sadd_with_overflow:
+  case Intrinsic::usub_with_overflow:
+  case Intrinsic::ssub_with_overflow:
+  case Intrinsic::umul_with_overflow:
+  case Intrinsic::smul_with_overflow:
   case Intrinsic::sqrt: // Begin floating-point.
   case Intrinsic::asin:
   case Intrinsic::acos:
@@ -130,15 +136,6 @@ bool llvm::isTriviallyScalarizable(Intrinsic::ID ID,
   if (TTI && Intrinsic::isTargetIntrinsic(ID))
     return TTI->isTargetIntrinsicTriviallyScalarizable(ID);
 
-  switch (ID) {
-  case Intrinsic::uadd_with_overflow:
-  case Intrinsic::sadd_with_overflow:
-  case Intrinsic::ssub_with_overflow:
-  case Intrinsic::usub_with_overflow:
-  case Intrinsic::umul_with_overflow:
-  case Intrinsic::smul_with_overflow:
-    return true;
-  }
   return false;
 }
 

diff  --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index f6bafae3e2acb..88cd5129b51ab 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -1849,7 +1849,12 @@ void VPWidenIntrinsicRecipe::execute(VPTransformState &State) {
   if (isVectorIntrinsicWithOverloadTypeAtArg(VectorIntrinsicID, -1,
                                              State.TTI)) {
     Type *RetTy = toVectorizedTy(getResultType(), State.VF);
-    append_range(TysForDecl, getContainedTypes(RetTy));
+    ArrayRef<Type *> ContainedTys = getContainedTypes(RetTy);
+    for (auto [Idx, Ty] : enumerate(ContainedTys)) {
+      if (isVectorIntrinsicWithStructReturnOverloadAtField(VectorIntrinsicID,
+                                                           Idx, State.TTI))
+        TysForDecl.push_back(Ty);
+    }
   }
   SmallVector<Value *, 4> Args;
   for (const auto &I : enumerate(operands())) {

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
index 66247a4f8100e..55994ad9a98f8 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/multiple-result-intrinsics.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|modf|extractvalue|store)" --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|modf|extractvalue|store|with\.overflow)" --version 5
 ; RUN: opt -passes=loop-vectorize -mtriple=aarch64-gnu-linux -mcpu=neoverse-v1 -mattr=+sve < %s -S -o - -debug-only=loop-vectorize 2>%t.1 | FileCheck %s --check-prefix=CHECK
 ; RUN: opt -passes=loop-vectorize -mtriple=aarch64-gnu-linux -mcpu=neoverse-v1 -mattr=+sve -vector-library=ArmPL < %s -S -o - -debug-only=loop-vectorize 2>%t.2 | FileCheck %s --check-prefix=CHECK-ARMPL
 ; RUN: FileCheck --input-file=%t.1 --check-prefix=CHECK-COST %s
@@ -526,3 +526,75 @@ for.body:
 exit:
   ret void
 }
+
+; CHECK-COST-LABEL: sadd_with_overflow_i32
+; CHECK-COST: LV: Found an estimated cost of 1 for VF 1 For instruction:   %call = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %val_a, i32 %val_b)
+; CHECK-COST: Cost of 4 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST: Cost of 4 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST: Cost of 7 for VF 8: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST: Cost of 13 for VF 16: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST: Cost of Invalid for VF vscale x 1: REPLICATE ir<%call> = call @llvm.sadd.with.overflow.i32(ir<%val_a>, ir<%val_b>)
+; CHECK-COST: Cost of 4 for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST: Cost of 4 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+
+; CHECK-COST-ARMPL-LABEL: sadd_with_overflow_i32
+; CHECK-COST-ARMPL: LV: Found an estimated cost of 1 for VF 1 For instruction:   %call = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %val_a, i32 %val_b)
+; CHECK-COST-ARMPL: Cost of 4 for VF 2: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST-ARMPL: Cost of 4 for VF 4: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST-ARMPL: Cost of 7 for VF 8: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST-ARMPL: Cost of 13 for VF 16: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST-ARMPL: Cost of Invalid for VF vscale x 1: REPLICATE ir<%call> = call @llvm.sadd.with.overflow.i32(ir<%val_a>, ir<%val_b>)
+; CHECK-COST-ARMPL: Cost of 4 for VF vscale x 2: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+; CHECK-COST-ARMPL: Cost of 4 for VF vscale x 4: WIDEN-INTRINSIC ir<%call> = call llvm.sadd.with.overflow(ir<%val_a>, ir<%val_b>)
+
+define void @sadd_with_overflow_i32(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @sadd_with_overflow_i32(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) #[[ATTR0]] {
+; CHECK:  [[ENTRY:.*:]]
+; CHECK:  [[VECTOR_PH:.*:]]
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:    [[TMP9:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.sadd.with.overflow.nxv4i32(<vscale x 4 x i32> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD1:%.*]])
+; CHECK:    [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } [[TMP9]], 0
+; CHECK:    [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } [[TMP9]], 1
+; CHECK:    call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP13:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; CHECK:    call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP12:%.*]], ptr align 1 [[TMP14:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK:  [[MIDDLE_BLOCK:.*:]]
+; CHECK:  [[EXIT:.*:]]
+;
+; CHECK-ARMPL-LABEL: define void @sadd_with_overflow_i32(
+; CHECK-ARMPL-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) #[[ATTR0]] {
+; CHECK-ARMPL:  [[ENTRY:.*:]]
+; CHECK-ARMPL:  [[VECTOR_PH:.*:]]
+; CHECK-ARMPL:  [[VECTOR_BODY:.*:]]
+; CHECK-ARMPL:    [[TMP9:%.*]] = call { <vscale x 4 x i32>, <vscale x 4 x i1> } @llvm.sadd.with.overflow.nxv4i32(<vscale x 4 x i32> [[WIDE_MASKED_LOAD:%.*]], <vscale x 4 x i32> [[WIDE_MASKED_LOAD1:%.*]])
+; CHECK-ARMPL:    [[TMP10:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } [[TMP9]], 0
+; CHECK-ARMPL:    [[TMP11:%.*]] = extractvalue { <vscale x 4 x i32>, <vscale x 4 x i1> } [[TMP9]], 1
+; CHECK-ARMPL:    call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[TMP10]], ptr align 4 [[TMP13:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK:%.*]])
+; CHECK-ARMPL:    call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP12:%.*]], ptr align 1 [[TMP14:%.*]], <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
+; CHECK-ARMPL:  [[MIDDLE_BLOCK:.*:]]
+; CHECK-ARMPL:  [[EXIT:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i32, ptr %in_a, i64 %iv
+  %val_a = load i32, ptr %arrayidx_a, align 4
+  %arrayidx_b = getelementptr inbounds i32, ptr %in_b, i64 %iv
+  %val_b = load i32, ptr %arrayidx_b, align 4
+  %call = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %val_a, i32 %val_b)
+  %result = extractvalue { i32, i1 } %call, 0
+  %overflow = extractvalue { i32, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i32, ptr %out_result, i64 %iv
+  store i32 %result, ptr %arrayidx_result, align 4
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}

diff  --git a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
index c6fcbed983d3c..f64d43adecfb8 100644
--- a/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
+++ b/llvm/test/Transforms/LoopVectorize/multiple-result-intrinsics.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|frexp|modf|extract|store)" --version 5
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --filter "(:|sincos|frexp|modf|extract|store|with\.overflow)" --version 5
 ; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 < %s -S -o - | FileCheck %s
 
 define void @sincos_f32(ptr noalias %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
@@ -348,3 +348,471 @@ for.body:
 exit:
   ret void
 }
+
+define void @uadd_with_overflow_i32(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @uadd_with_overflow_i32(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.uadd.with.overflow.v2i32(<2 x i32> [[WIDE_LOAD:%.*]], <2 x i32> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i32> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i32, ptr %in_a, i64 %iv
+  %val_a = load i32, ptr %arrayidx_a, align 4
+  %arrayidx_b = getelementptr inbounds i32, ptr %in_b, i64 %iv
+  %val_b = load i32, ptr %arrayidx_b, align 4
+  %call = tail call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %val_a, i32 %val_b)
+  %result = extractvalue { i32, i1 } %call, 0
+  %overflow = extractvalue { i32, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i32, ptr %out_result, i64 %iv
+  store i32 %result, ptr %arrayidx_result, align 4
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @uadd_with_overflow_i64(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @uadd_with_overflow_i64(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i64>, <2 x i1> } @llvm.uadd.with.overflow.v2i64(<2 x i64> [[WIDE_LOAD:%.*]], <2 x i64> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i64> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i64, ptr %in_a, i64 %iv
+  %val_a = load i64, ptr %arrayidx_a, align 8
+  %arrayidx_b = getelementptr inbounds i64, ptr %in_b, i64 %iv
+  %val_b = load i64, ptr %arrayidx_b, align 8
+  %call = tail call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %val_a, i64 %val_b)
+  %result = extractvalue { i64, i1 } %call, 0
+  %overflow = extractvalue { i64, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i64, ptr %out_result, i64 %iv
+  store i64 %result, ptr %arrayidx_result, align 8
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @sadd_with_overflow_i32(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @sadd_with_overflow_i32(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[WIDE_LOAD:%.*]], <2 x i32> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i32> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i32, ptr %in_a, i64 %iv
+  %val_a = load i32, ptr %arrayidx_a, align 4
+  %arrayidx_b = getelementptr inbounds i32, ptr %in_b, i64 %iv
+  %val_b = load i32, ptr %arrayidx_b, align 4
+  %call = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 %val_a, i32 %val_b)
+  %result = extractvalue { i32, i1 } %call, 0
+  %overflow = extractvalue { i32, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i32, ptr %out_result, i64 %iv
+  store i32 %result, ptr %arrayidx_result, align 4
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @sadd_with_overflow_i64(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @sadd_with_overflow_i64(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i64>, <2 x i1> } @llvm.sadd.with.overflow.v2i64(<2 x i64> [[WIDE_LOAD:%.*]], <2 x i64> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i64> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i64, ptr %in_a, i64 %iv
+  %val_a = load i64, ptr %arrayidx_a, align 8
+  %arrayidx_b = getelementptr inbounds i64, ptr %in_b, i64 %iv
+  %val_b = load i64, ptr %arrayidx_b, align 8
+  %call = tail call { i64, i1 } @llvm.sadd.with.overflow.i64(i64 %val_a, i64 %val_b)
+  %result = extractvalue { i64, i1 } %call, 0
+  %overflow = extractvalue { i64, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i64, ptr %out_result, i64 %iv
+  store i64 %result, ptr %arrayidx_result, align 8
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @usub_with_overflow_i32(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @usub_with_overflow_i32(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.usub.with.overflow.v2i32(<2 x i32> [[WIDE_LOAD:%.*]], <2 x i32> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i32> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i32, ptr %in_a, i64 %iv
+  %val_a = load i32, ptr %arrayidx_a, align 4
+  %arrayidx_b = getelementptr inbounds i32, ptr %in_b, i64 %iv
+  %val_b = load i32, ptr %arrayidx_b, align 4
+  %call = tail call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %val_a, i32 %val_b)
+  %result = extractvalue { i32, i1 } %call, 0
+  %overflow = extractvalue { i32, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i32, ptr %out_result, i64 %iv
+  store i32 %result, ptr %arrayidx_result, align 4
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @usub_with_overflow_i64(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @usub_with_overflow_i64(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i64>, <2 x i1> } @llvm.usub.with.overflow.v2i64(<2 x i64> [[WIDE_LOAD:%.*]], <2 x i64> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i64> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i64, ptr %in_a, i64 %iv
+  %val_a = load i64, ptr %arrayidx_a, align 8
+  %arrayidx_b = getelementptr inbounds i64, ptr %in_b, i64 %iv
+  %val_b = load i64, ptr %arrayidx_b, align 8
+  %call = tail call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %val_a, i64 %val_b)
+  %result = extractvalue { i64, i1 } %call, 0
+  %overflow = extractvalue { i64, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i64, ptr %out_result, i64 %iv
+  store i64 %result, ptr %arrayidx_result, align 8
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @ssub_with_overflow_i32(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @ssub_with_overflow_i32(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.ssub.with.overflow.v2i32(<2 x i32> [[WIDE_LOAD:%.*]], <2 x i32> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i32> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i32, ptr %in_a, i64 %iv
+  %val_a = load i32, ptr %arrayidx_a, align 4
+  %arrayidx_b = getelementptr inbounds i32, ptr %in_b, i64 %iv
+  %val_b = load i32, ptr %arrayidx_b, align 4
+  %call = tail call { i32, i1 } @llvm.ssub.with.overflow.i32(i32 %val_a, i32 %val_b)
+  %result = extractvalue { i32, i1 } %call, 0
+  %overflow = extractvalue { i32, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i32, ptr %out_result, i64 %iv
+  store i32 %result, ptr %arrayidx_result, align 4
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @ssub_with_overflow_i64(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @ssub_with_overflow_i64(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i64>, <2 x i1> } @llvm.ssub.with.overflow.v2i64(<2 x i64> [[WIDE_LOAD:%.*]], <2 x i64> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i64> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i64, ptr %in_a, i64 %iv
+  %val_a = load i64, ptr %arrayidx_a, align 8
+  %arrayidx_b = getelementptr inbounds i64, ptr %in_b, i64 %iv
+  %val_b = load i64, ptr %arrayidx_b, align 8
+  %call = tail call { i64, i1 } @llvm.ssub.with.overflow.i64(i64 %val_a, i64 %val_b)
+  %result = extractvalue { i64, i1 } %call, 0
+  %overflow = extractvalue { i64, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i64, ptr %out_result, i64 %iv
+  store i64 %result, ptr %arrayidx_result, align 8
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @umul_with_overflow_i32(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @umul_with_overflow_i32(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.umul.with.overflow.v2i32(<2 x i32> [[WIDE_LOAD:%.*]], <2 x i32> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i32> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i32, ptr %in_a, i64 %iv
+  %val_a = load i32, ptr %arrayidx_a, align 4
+  %arrayidx_b = getelementptr inbounds i32, ptr %in_b, i64 %iv
+  %val_b = load i32, ptr %arrayidx_b, align 4
+  %call = tail call { i32, i1 } @llvm.umul.with.overflow.i32(i32 %val_a, i32 %val_b)
+  %result = extractvalue { i32, i1 } %call, 0
+  %overflow = extractvalue { i32, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i32, ptr %out_result, i64 %iv
+  store i32 %result, ptr %arrayidx_result, align 4
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @umul_with_overflow_i64(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @umul_with_overflow_i64(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i64>, <2 x i1> } @llvm.umul.with.overflow.v2i64(<2 x i64> [[WIDE_LOAD:%.*]], <2 x i64> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i64> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i64, ptr %in_a, i64 %iv
+  %val_a = load i64, ptr %arrayidx_a, align 8
+  %arrayidx_b = getelementptr inbounds i64, ptr %in_b, i64 %iv
+  %val_b = load i64, ptr %arrayidx_b, align 8
+  %call = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %val_a, i64 %val_b)
+  %result = extractvalue { i64, i1 } %call, 0
+  %overflow = extractvalue { i64, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i64, ptr %out_result, i64 %iv
+  store i64 %result, ptr %arrayidx_result, align 8
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @smul_with_overflow_i32(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @smul_with_overflow_i32(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.smul.with.overflow.v2i32(<2 x i32> [[WIDE_LOAD:%.*]], <2 x i32> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i32> [[TMP5]], ptr [[TMP9:%.*]], align 4
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i32, ptr %in_a, i64 %iv
+  %val_a = load i32, ptr %arrayidx_a, align 4
+  %arrayidx_b = getelementptr inbounds i32, ptr %in_b, i64 %iv
+  %val_b = load i32, ptr %arrayidx_b, align 4
+  %call = tail call { i32, i1 } @llvm.smul.with.overflow.i32(i32 %val_a, i32 %val_b)
+  %result = extractvalue { i32, i1 } %call, 0
+  %overflow = extractvalue { i32, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i32, ptr %out_result, i64 %iv
+  store i32 %result, ptr %arrayidx_result, align 4
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}
+
+define void @smul_with_overflow_i64(ptr noalias %in_a, ptr noalias %in_b, ptr noalias writeonly %out_result, ptr noalias writeonly %out_overflow) {
+; CHECK-LABEL: define void @smul_with_overflow_i64(
+; CHECK-SAME: ptr noalias [[IN_A:%.*]], ptr noalias [[IN_B:%.*]], ptr noalias writeonly [[OUT_RESULT:%.*]], ptr noalias writeonly [[OUT_OVERFLOW:%.*]]) {
+; CHECK:  [[VECTOR_BODY:.*:]]
+; CHECK:  [[FOR_BODY:.*:]]
+; CHECK:  [[VECTOR_BODY1:.*:]]
+; CHECK:    [[TMP4:%.*]] = call { <2 x i64>, <2 x i1> } @llvm.smul.with.overflow.v2i64(<2 x i64> [[WIDE_LOAD:%.*]], <2 x i64> [[WIDE_LOAD1:%.*]])
+; CHECK:    [[TMP5:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 0
+; CHECK:    [[TMP6:%.*]] = extractvalue { <2 x i64>, <2 x i1> } [[TMP4]], 1
+; CHECK:    store <2 x i64> [[TMP5]], ptr [[TMP9:%.*]], align 8
+; CHECK:    store <2 x i8> [[TMP8:%.*]], ptr [[TMP7:%.*]], align 1
+; CHECK:  [[EXIT:.*:]]
+; CHECK:  [[EXIT1:.*:]]
+;
+entry:
+  br label %for.body
+
+for.body:
+  %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+  %arrayidx_a = getelementptr inbounds i64, ptr %in_a, i64 %iv
+  %val_a = load i64, ptr %arrayidx_a, align 8
+  %arrayidx_b = getelementptr inbounds i64, ptr %in_b, i64 %iv
+  %val_b = load i64, ptr %arrayidx_b, align 8
+  %call = tail call { i64, i1 } @llvm.smul.with.overflow.i64(i64 %val_a, i64 %val_b)
+  %result = extractvalue { i64, i1 } %call, 0
+  %overflow = extractvalue { i64, i1 } %call, 1
+  %zext_overflow = zext i1 %overflow to i8
+  %arrayidx_result = getelementptr inbounds i64, ptr %out_result, i64 %iv
+  store i64 %result, ptr %arrayidx_result, align 8
+  %arrayidx_overflow = getelementptr inbounds i8, ptr %out_overflow, i64 %iv
+  store i8 %zext_overflow, ptr %arrayidx_overflow, align 1
+  %iv.next = add nuw nsw i64 %iv, 1
+  %exitcond.not = icmp eq i64 %iv.next, 1024
+  br i1 %exitcond.not, label %exit, label %for.body
+
+exit:
+  ret void
+}

diff  --git a/llvm/test/Transforms/LoopVectorize/struct-return.ll b/llvm/test/Transforms/LoopVectorize/struct-return.ll
index 83c87f1e15e8f..2f8acd641b571 100644
--- a/llvm/test/Transforms/LoopVectorize/struct-return.ll
+++ b/llvm/test/Transforms/LoopVectorize/struct-return.ll
@@ -166,28 +166,31 @@ exit:
   ret void
 }
 
-; TODO: Allow mixed-struct type vectorization and mark overflow intrinsics as trivially vectorizable.
-; CHECK-REMARKS:         remark: {{.*}} loop not vectorized: call instruction cannot be vectorized
+; CHECK-REMARKS:	 remark: {{.*}} vectorized loop
 define void @test_overflow_intrinsic(ptr noalias readonly %in, ptr noalias writeonly %out_a, ptr noalias writeonly %out_b) {
 ; CHECK-LABEL: define void @test_overflow_intrinsic(
 ; CHECK-SAME: ptr noalias readonly [[IN:%.*]], ptr noalias writeonly [[OUT_A:%.*]], ptr noalias writeonly [[OUT_B:%.*]]) {
-; CHECK-NEXT:  [[ENTRY:.*]]:
-; CHECK-NEXT:    br label %[[FOR_BODY:.*]]
-; CHECK:       [[FOR_BODY]]:
-; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[FOR_BODY]] ]
+; CHECK-NEXT:  [[ENTRY:.*:]]
+; CHECK-NEXT:    br label %[[VECTOR_PH:.*]]
+; CHECK:       [[VECTOR_PH]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[IV:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[IV_NEXT:%.*]], %[[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds float, ptr [[IN]], i64 [[IV]]
-; CHECK-NEXT:    [[IN_VAL:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT:    [[CALL:%.*]] = tail call { i32, i1 } @llvm.sadd.with.overflow.i32(i32 [[IN_VAL]], i32 [[IN_VAL]])
-; CHECK-NEXT:    [[EXTRACT_RET:%.*]] = extractvalue { i32, i1 } [[CALL]], 0
-; CHECK-NEXT:    [[EXTRACT_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[CALL]], 1
-; CHECK-NEXT:    [[ZEXT_OVERFLOW:%.*]] = zext i1 [[EXTRACT_OVERFLOW]] to i8
+; CHECK-NEXT:    [[WIDE_LOAD:%.*]] = load <2 x i32>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = call { <2 x i32>, <2 x i1> } @llvm.sadd.with.overflow.v2i32(<2 x i32> [[WIDE_LOAD]], <2 x i32> [[WIDE_LOAD]])
+; CHECK-NEXT:    [[TMP2:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP1]], 0
+; CHECK-NEXT:    [[TMP3:%.*]] = extractvalue { <2 x i32>, <2 x i1> } [[TMP1]], 1
+; CHECK-NEXT:    [[TMP4:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i8>
 ; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[OUT_A]], i64 [[IV]]
-; CHECK-NEXT:    store i32 [[EXTRACT_RET]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    store <2 x i32> [[TMP2]], ptr [[ARRAYIDX2]], align 4
 ; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[OUT_B]], i64 [[IV]]
-; CHECK-NEXT:    store i8 [[ZEXT_OVERFLOW]], ptr [[ARRAYIDX4]], align 4
-; CHECK-NEXT:    [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT:    store <2 x i8> [[TMP4]], ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[IV_NEXT]] = add nuw i64 [[IV]], 2
 ; CHECK-NEXT:    [[EXITCOND_NOT:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
-; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[EXIT:.*]], label %[[FOR_BODY]]
+; CHECK-NEXT:    br i1 [[EXITCOND_NOT]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK:       [[MIDDLE_BLOCK]]:
+; CHECK-NEXT:    br label %[[EXIT:.*]]
 ; CHECK:       [[EXIT]]:
 ; CHECK-NEXT:    ret void
 ;


        


More information about the llvm-commits mailing list