[llvm] 2db46db - [SLP] Add tests for awkward laod orders from SLP. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sat May 7 02:27:36 PDT 2022


Author: David Green
Date: 2022-05-07T10:27:32+01:00
New Revision: 2db46db54d8ae6d3cfc30030768b53637d88e137

URL: https://github.com/llvm/llvm-project/commit/2db46db54d8ae6d3cfc30030768b53637d88e137
DIFF: https://github.com/llvm/llvm-project/commit/2db46db54d8ae6d3cfc30030768b53637d88e137.diff

LOG: [SLP] Add tests for awkward laod orders from SLP. NFC

Added: 
    llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll b/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll
new file mode 100644
index 0000000000000..e02afa6cf6ed9
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/AArch64/loadorder.ll
@@ -0,0 +1,2168 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -slp-vectorizer -instcombine -mtriple=aarch64--linux-gnu < %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64"
+
+define i16 @reduce_allstrided(i16* nocapture noundef readonly %x, i16* nocapture noundef readonly %y, i32 noundef %stride) {
+; CHECK-LABEL: @reduce_allstrided(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* [[X:%.*]], align 2
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* [[ARRAYIDX1]], align 2
+; CHECK-NEXT:    [[MUL2:%.*]] = shl nsw i32 [[STRIDE]], 1
+; CHECK-NEXT:    [[IDXPROM3:%.*]] = sext i32 [[MUL2]] to i64
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM3]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2
+; CHECK-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[STRIDE]], 3
+; CHECK-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[MUL5]] to i64
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
+; CHECK-NEXT:    [[MUL8:%.*]] = shl nsw i32 [[STRIDE]], 2
+; CHECK-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[MUL8]] to i64
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-NEXT:    [[MUL11:%.*]] = mul nsw i32 [[STRIDE]], 5
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[MUL11]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[ARRAYIDX13]], align 2
+; CHECK-NEXT:    [[MUL14:%.*]] = mul nsw i32 [[STRIDE]], 6
+; CHECK-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[MUL14]] to i64
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM15]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, i16* [[ARRAYIDX16]], align 2
+; CHECK-NEXT:    [[MUL17:%.*]] = mul nsw i32 [[STRIDE]], 7
+; CHECK-NEXT:    [[IDXPROM18:%.*]] = sext i32 [[MUL17]] to i64
+; CHECK-NEXT:    [[ARRAYIDX19:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM18]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[ARRAYIDX19]], align 2
+; CHECK-NEXT:    [[TMP8:%.*]] = load i16, i16* [[Y:%.*]], align 2
+; CHECK-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX23]], align 2
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM3]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
+; CHECK-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load i16, i16* [[ARRAYIDX29]], align 2
+; CHECK-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[TMP12:%.*]] = load i16, i16* [[ARRAYIDX32]], align 2
+; CHECK-NEXT:    [[ARRAYIDX35:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX35]], align 2
+; CHECK-NEXT:    [[ARRAYIDX38:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM15]]
+; CHECK-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX38]], align 2
+; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM18]]
+; CHECK-NEXT:    [[TMP15:%.*]] = load i16, i16* [[ARRAYIDX41]], align 2
+; CHECK-NEXT:    [[MUL43:%.*]] = mul i16 [[TMP8]], [[TMP0]]
+; CHECK-NEXT:    [[MUL48:%.*]] = mul i16 [[TMP9]], [[TMP1]]
+; CHECK-NEXT:    [[ADD49:%.*]] = add i16 [[MUL48]], [[MUL43]]
+; CHECK-NEXT:    [[MUL54:%.*]] = mul i16 [[TMP10]], [[TMP2]]
+; CHECK-NEXT:    [[ADD55:%.*]] = add i16 [[ADD49]], [[MUL54]]
+; CHECK-NEXT:    [[MUL60:%.*]] = mul i16 [[TMP11]], [[TMP3]]
+; CHECK-NEXT:    [[ADD61:%.*]] = add i16 [[ADD55]], [[MUL60]]
+; CHECK-NEXT:    [[MUL66:%.*]] = mul i16 [[TMP12]], [[TMP4]]
+; CHECK-NEXT:    [[ADD67:%.*]] = add i16 [[ADD61]], [[MUL66]]
+; CHECK-NEXT:    [[MUL72:%.*]] = mul i16 [[TMP13]], [[TMP5]]
+; CHECK-NEXT:    [[ADD73:%.*]] = add i16 [[ADD67]], [[MUL72]]
+; CHECK-NEXT:    [[MUL78:%.*]] = mul i16 [[TMP14]], [[TMP6]]
+; CHECK-NEXT:    [[ADD79:%.*]] = add i16 [[ADD73]], [[MUL78]]
+; CHECK-NEXT:    [[MUL84:%.*]] = mul i16 [[TMP15]], [[TMP7]]
+; CHECK-NEXT:    [[ADD85:%.*]] = add i16 [[ADD79]], [[MUL84]]
+; CHECK-NEXT:    ret i16 [[ADD85]]
+;
+entry:
+  %0 = load i16, i16* %x, align 2
+  %idxprom = sext i32 %stride to i64
+  %arrayidx1 = getelementptr inbounds i16, i16* %x, i64 %idxprom
+  %1 = load i16, i16* %arrayidx1, align 2
+  %mul2 = shl nsw i32 %stride, 1
+  %idxprom3 = sext i32 %mul2 to i64
+  %arrayidx4 = getelementptr inbounds i16, i16* %x, i64 %idxprom3
+  %2 = load i16, i16* %arrayidx4, align 2
+  %mul5 = mul nsw i32 %stride, 3
+  %idxprom6 = sext i32 %mul5 to i64
+  %arrayidx7 = getelementptr inbounds i16, i16* %x, i64 %idxprom6
+  %3 = load i16, i16* %arrayidx7, align 2
+  %mul8 = shl nsw i32 %stride, 2
+  %idxprom9 = sext i32 %mul8 to i64
+  %arrayidx10 = getelementptr inbounds i16, i16* %x, i64 %idxprom9
+  %4 = load i16, i16* %arrayidx10, align 2
+  %mul11 = mul nsw i32 %stride, 5
+  %idxprom12 = sext i32 %mul11 to i64
+  %arrayidx13 = getelementptr inbounds i16, i16* %x, i64 %idxprom12
+  %5 = load i16, i16* %arrayidx13, align 2
+  %mul14 = mul nsw i32 %stride, 6
+  %idxprom15 = sext i32 %mul14 to i64
+  %arrayidx16 = getelementptr inbounds i16, i16* %x, i64 %idxprom15
+  %6 = load i16, i16* %arrayidx16, align 2
+  %mul17 = mul nsw i32 %stride, 7
+  %idxprom18 = sext i32 %mul17 to i64
+  %arrayidx19 = getelementptr inbounds i16, i16* %x, i64 %idxprom18
+  %7 = load i16, i16* %arrayidx19, align 2
+  %8 = load i16, i16* %y, align 2
+  %arrayidx23 = getelementptr inbounds i16, i16* %y, i64 %idxprom
+  %9 = load i16, i16* %arrayidx23, align 2
+  %arrayidx26 = getelementptr inbounds i16, i16* %y, i64 %idxprom3
+  %10 = load i16, i16* %arrayidx26, align 2
+  %arrayidx29 = getelementptr inbounds i16, i16* %y, i64 %idxprom6
+  %11 = load i16, i16* %arrayidx29, align 2
+  %arrayidx32 = getelementptr inbounds i16, i16* %y, i64 %idxprom9
+  %12 = load i16, i16* %arrayidx32, align 2
+  %arrayidx35 = getelementptr inbounds i16, i16* %y, i64 %idxprom12
+  %13 = load i16, i16* %arrayidx35, align 2
+  %arrayidx38 = getelementptr inbounds i16, i16* %y, i64 %idxprom15
+  %14 = load i16, i16* %arrayidx38, align 2
+  %arrayidx41 = getelementptr inbounds i16, i16* %y, i64 %idxprom18
+  %15 = load i16, i16* %arrayidx41, align 2
+  %mul43 = mul i16 %8, %0
+  %mul48 = mul i16 %9, %1
+  %add49 = add i16 %mul48, %mul43
+  %mul54 = mul i16 %10, %2
+  %add55 = add i16 %add49, %mul54
+  %mul60 = mul i16 %11, %3
+  %add61 = add i16 %add55, %mul60
+  %mul66 = mul i16 %12, %4
+  %add67 = add i16 %add61, %mul66
+  %mul72 = mul i16 %13, %5
+  %add73 = add i16 %add67, %mul72
+  %mul78 = mul i16 %14, %6
+  %add79 = add i16 %add73, %mul78
+  %mul84 = mul i16 %15, %7
+  %add85 = add i16 %add79, %mul84
+  ret i16 %add85
+}
+
+define i16 @reduce_blockstrided2(i16* nocapture noundef readonly %x, i16* nocapture noundef readonly %y, i32 noundef %stride) {
+; CHECK-LABEL: @reduce_blockstrided2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load i16, i16* [[X:%.*]], align 2
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i16, i16* [[ARRAYIDX1]], align 2
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
+; CHECK-NEXT:    [[ADD3:%.*]] = add nsw i32 [[STRIDE]], 1
+; CHECK-NEXT:    [[IDXPROM4:%.*]] = sext i32 [[ADD3]] to i64
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM4]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2
+; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i32 [[STRIDE]], 1
+; CHECK-NEXT:    [[IDXPROM7:%.*]] = sext i32 [[MUL]] to i64
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM7]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX8]], align 2
+; CHECK-NEXT:    [[ADD10:%.*]] = or i32 [[MUL]], 1
+; CHECK-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[ADD10]] to i64
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM11]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[ARRAYIDX12]], align 2
+; CHECK-NEXT:    [[MUL13:%.*]] = mul nsw i32 [[STRIDE]], 3
+; CHECK-NEXT:    [[IDXPROM15:%.*]] = sext i32 [[MUL13]] to i64
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM15]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load i16, i16* [[ARRAYIDX16]], align 2
+; CHECK-NEXT:    [[ADD18:%.*]] = add nsw i32 [[MUL13]], 1
+; CHECK-NEXT:    [[IDXPROM19:%.*]] = sext i32 [[ADD18]] to i64
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM19]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load i16, i16* [[ARRAYIDX20]], align 2
+; CHECK-NEXT:    [[TMP8:%.*]] = load i16, i16* [[Y:%.*]], align 2
+; CHECK-NEXT:    [[ARRAYIDX24:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX24]], align 2
+; CHECK-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM7]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load i16, i16* [[ARRAYIDX28]], align 2
+; CHECK-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM15]]
+; CHECK-NEXT:    [[TMP11:%.*]] = load i16, i16* [[ARRAYIDX32]], align 2
+; CHECK-NEXT:    [[ARRAYIDX33:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i16, i16* [[ARRAYIDX33]], align 2
+; CHECK-NEXT:    [[ARRAYIDX36:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM4]]
+; CHECK-NEXT:    [[TMP13:%.*]] = load i16, i16* [[ARRAYIDX36]], align 2
+; CHECK-NEXT:    [[ARRAYIDX40:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM11]]
+; CHECK-NEXT:    [[TMP14:%.*]] = load i16, i16* [[ARRAYIDX40]], align 2
+; CHECK-NEXT:    [[ARRAYIDX44:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM19]]
+; CHECK-NEXT:    [[TMP15:%.*]] = load i16, i16* [[ARRAYIDX44]], align 2
+; CHECK-NEXT:    [[MUL46:%.*]] = mul i16 [[TMP8]], [[TMP0]]
+; CHECK-NEXT:    [[MUL52:%.*]] = mul i16 [[TMP12]], [[TMP1]]
+; CHECK-NEXT:    [[MUL58:%.*]] = mul i16 [[TMP9]], [[TMP2]]
+; CHECK-NEXT:    [[MUL64:%.*]] = mul i16 [[TMP13]], [[TMP3]]
+; CHECK-NEXT:    [[MUL70:%.*]] = mul i16 [[TMP10]], [[TMP4]]
+; CHECK-NEXT:    [[MUL76:%.*]] = mul i16 [[TMP14]], [[TMP5]]
+; CHECK-NEXT:    [[MUL82:%.*]] = mul i16 [[TMP11]], [[TMP6]]
+; CHECK-NEXT:    [[MUL88:%.*]] = mul i16 [[TMP15]], [[TMP7]]
+; CHECK-NEXT:    [[ADD53:%.*]] = add i16 [[MUL58]], [[MUL46]]
+; CHECK-NEXT:    [[ADD59:%.*]] = add i16 [[ADD53]], [[MUL70]]
+; CHECK-NEXT:    [[ADD65:%.*]] = add i16 [[ADD59]], [[MUL82]]
+; CHECK-NEXT:    [[ADD71:%.*]] = add i16 [[ADD65]], [[MUL52]]
+; CHECK-NEXT:    [[ADD77:%.*]] = add i16 [[ADD71]], [[MUL64]]
+; CHECK-NEXT:    [[ADD83:%.*]] = add i16 [[ADD77]], [[MUL76]]
+; CHECK-NEXT:    [[ADD89:%.*]] = add i16 [[ADD83]], [[MUL88]]
+; CHECK-NEXT:    ret i16 [[ADD89]]
+;
+entry:
+  %0 = load i16, i16* %x, align 2
+  %arrayidx1 = getelementptr inbounds i16, i16* %x, i64 1
+  %1 = load i16, i16* %arrayidx1, align 2
+  %idxprom = sext i32 %stride to i64
+  %arrayidx2 = getelementptr inbounds i16, i16* %x, i64 %idxprom
+  %2 = load i16, i16* %arrayidx2, align 2
+  %add3 = add nsw i32 %stride, 1
+  %idxprom4 = sext i32 %add3 to i64
+  %arrayidx5 = getelementptr inbounds i16, i16* %x, i64 %idxprom4
+  %3 = load i16, i16* %arrayidx5, align 2
+  %mul = shl nsw i32 %stride, 1
+  %idxprom7 = sext i32 %mul to i64
+  %arrayidx8 = getelementptr inbounds i16, i16* %x, i64 %idxprom7
+  %4 = load i16, i16* %arrayidx8, align 2
+  %add10 = or i32 %mul, 1
+  %idxprom11 = sext i32 %add10 to i64
+  %arrayidx12 = getelementptr inbounds i16, i16* %x, i64 %idxprom11
+  %5 = load i16, i16* %arrayidx12, align 2
+  %mul13 = mul nsw i32 %stride, 3
+  %idxprom15 = sext i32 %mul13 to i64
+  %arrayidx16 = getelementptr inbounds i16, i16* %x, i64 %idxprom15
+  %6 = load i16, i16* %arrayidx16, align 2
+  %add18 = add nsw i32 %mul13, 1
+  %idxprom19 = sext i32 %add18 to i64
+  %arrayidx20 = getelementptr inbounds i16, i16* %x, i64 %idxprom19
+  %7 = load i16, i16* %arrayidx20, align 2
+  %8 = load i16, i16* %y, align 2
+  %arrayidx24 = getelementptr inbounds i16, i16* %y, i64 %idxprom
+  %9 = load i16, i16* %arrayidx24, align 2
+  %arrayidx28 = getelementptr inbounds i16, i16* %y, i64 %idxprom7
+  %10 = load i16, i16* %arrayidx28, align 2
+  %arrayidx32 = getelementptr inbounds i16, i16* %y, i64 %idxprom15
+  %11 = load i16, i16* %arrayidx32, align 2
+  %arrayidx33 = getelementptr inbounds i16, i16* %y, i64 1
+  %12 = load i16, i16* %arrayidx33, align 2
+  %arrayidx36 = getelementptr inbounds i16, i16* %y, i64 %idxprom4
+  %13 = load i16, i16* %arrayidx36, align 2
+  %arrayidx40 = getelementptr inbounds i16, i16* %y, i64 %idxprom11
+  %14 = load i16, i16* %arrayidx40, align 2
+  %arrayidx44 = getelementptr inbounds i16, i16* %y, i64 %idxprom19
+  %15 = load i16, i16* %arrayidx44, align 2
+  %mul46 = mul i16 %8, %0
+  %mul52 = mul i16 %12, %1
+  %mul58 = mul i16 %9, %2
+  %mul64 = mul i16 %13, %3
+  %mul70 = mul i16 %10, %4
+  %mul76 = mul i16 %14, %5
+  %mul82 = mul i16 %11, %6
+  %mul88 = mul i16 %15, %7
+  %add53 = add i16 %mul58, %mul46
+  %add59 = add i16 %add53, %mul70
+  %add65 = add i16 %add59, %mul82
+  %add71 = add i16 %add65, %mul52
+  %add77 = add i16 %add71, %mul64
+  %add83 = add i16 %add77, %mul76
+  %add89 = add i16 %add83, %mul88
+  ret i16 %add89
+}
+
+define i16 @reduce_blockstrided3(i16* nocapture noundef readonly %x, i16* nocapture noundef readonly %y, i32 noundef %stride) {
+; CHECK-LABEL: @reduce_blockstrided3(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[L0:%.*]] = load i16, i16* [[X:%.*]], align 2
+; CHECK-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 1
+; CHECK-NEXT:    [[L1:%.*]] = load i16, i16* [[ARRAYIDX1]], align 2
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 2
+; CHECK-NEXT:    [[L2:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[L4:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2
+; CHECK-NEXT:    [[ADD5:%.*]] = add nsw i32 [[STRIDE]], 1
+; CHECK-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[ADD5]] to i64
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[L5:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
+; CHECK-NEXT:    [[ADD8:%.*]] = add nsw i32 [[STRIDE]], 2
+; CHECK-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[ADD8]] to i64
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[L6:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-NEXT:    [[L8:%.*]] = load i16, i16* [[Y:%.*]], align 2
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 1
+; CHECK-NEXT:    [[L9:%.*]] = load i16, i16* [[ARRAYIDX15]], align 2
+; CHECK-NEXT:    [[ARRAYIDX16:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 2
+; CHECK-NEXT:    [[L10:%.*]] = load i16, i16* [[ARRAYIDX16]], align 2
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[L12:%.*]] = load i16, i16* [[ARRAYIDX20]], align 2
+; CHECK-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[L13:%.*]] = load i16, i16* [[ARRAYIDX23]], align 2
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[L14:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
+; CHECK-NEXT:    [[MUL:%.*]] = mul i16 [[L8]], [[L0]]
+; CHECK-NEXT:    [[MUL36:%.*]] = mul i16 [[L9]], [[L1]]
+; CHECK-NEXT:    [[ADD37:%.*]] = add i16 [[MUL36]], [[MUL]]
+; CHECK-NEXT:    [[MUL48:%.*]] = mul i16 [[L10]], [[L2]]
+; CHECK-NEXT:    [[ADD49:%.*]] = add i16 [[ADD37]], [[MUL48]]
+; CHECK-NEXT:    [[MUL54:%.*]] = mul i16 [[L13]], [[L5]]
+; CHECK-NEXT:    [[ADD55:%.*]] = add i16 [[ADD49]], [[MUL54]]
+; CHECK-NEXT:    [[MUL60:%.*]] = mul i16 [[L12]], [[L4]]
+; CHECK-NEXT:    [[ADD61:%.*]] = add i16 [[ADD55]], [[MUL60]]
+; CHECK-NEXT:    [[MUL72:%.*]] = mul i16 [[L14]], [[L6]]
+; CHECK-NEXT:    [[ADD73:%.*]] = add i16 [[ADD61]], [[MUL72]]
+; CHECK-NEXT:    ret i16 [[ADD73]]
+;
+entry:
+  %l0 = load i16, i16* %x, align 2
+  %arrayidx1 = getelementptr inbounds i16, i16* %x, i64 1
+  %l1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx2 = getelementptr inbounds i16, i16* %x, i64 2
+  %l2 = load i16, i16* %arrayidx2, align 2
+  %idxprom = sext i32 %stride to i64
+  %arrayidx4 = getelementptr inbounds i16, i16* %x, i64 %idxprom
+  %l4 = load i16, i16* %arrayidx4, align 2
+  %add5 = add nsw i32 %stride, 1
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds i16, i16* %x, i64 %idxprom6
+  %l5 = load i16, i16* %arrayidx7, align 2
+  %add8 = add nsw i32 %stride, 2
+  %idxprom9 = sext i32 %add8 to i64
+  %arrayidx10 = getelementptr inbounds i16, i16* %x, i64 %idxprom9
+  %l6 = load i16, i16* %arrayidx10, align 2
+  %add11 = add nsw i32 %stride, 3
+  %idxprom12 = sext i32 %add11 to i64
+  %l8 = load i16, i16* %y, align 2
+  %arrayidx15 = getelementptr inbounds i16, i16* %y, i64 1
+  %l9 = load i16, i16* %arrayidx15, align 2
+  %arrayidx16 = getelementptr inbounds i16, i16* %y, i64 2
+  %l10 = load i16, i16* %arrayidx16, align 2
+  %arrayidx20 = getelementptr inbounds i16, i16* %y, i64 %idxprom
+  %l12 = load i16, i16* %arrayidx20, align 2
+  %arrayidx23 = getelementptr inbounds i16, i16* %y, i64 %idxprom6
+  %l13 = load i16, i16* %arrayidx23, align 2
+  %arrayidx26 = getelementptr inbounds i16, i16* %y, i64 %idxprom9
+  %l14 = load i16, i16* %arrayidx26, align 2
+  %mul = mul i16 %l8, %l0
+  %mul36 = mul i16 %l9, %l1
+  %add37 = add i16 %mul36, %mul
+  %mul48 = mul i16 %l10, %l2
+  %add49 = add i16 %add37, %mul48
+  %mul54 = mul i16 %l13, %l5
+  %add55 = add i16 %add49, %mul54
+  %mul60 = mul i16 %l12, %l4
+  %add61 = add i16 %add55, %mul60
+  %mul72 = mul i16 %l14, %l6
+  %add73 = add i16 %add61, %mul72
+  ret i16 %add73
+}
+
+define i16 @reduce_blockstrided4(i16* nocapture noundef readonly %x, i16* nocapture noundef readonly %y, i32 noundef %stride) {
+; CHECK-LABEL: @reduce_blockstrided4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[X:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD5:%.*]] = add nsw i32 [[STRIDE]], 1
+; CHECK-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[ADD5]] to i64
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[ADD8:%.*]] = add nsw i32 [[STRIDE]], 2
+; CHECK-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[ADD8]] to i64
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[STRIDE]], 3
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[Y:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i16* [[X]] to <4 x i16>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2
+; CHECK-NEXT:    [[TMP3:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[ARRAYIDX13]], align 2
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[Y]] to <4 x i16>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i16>, <4 x i16>* [[TMP6]], align 2
+; CHECK-NEXT:    [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX20]], align 2
+; CHECK-NEXT:    [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX23]], align 2
+; CHECK-NEXT:    [[TMP10:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
+; CHECK-NEXT:    [[TMP11:%.*]] = load i16, i16* [[ARRAYIDX29]], align 2
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i16> [[TMP7]], <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP13:%.*]] = insertelement <8 x i16> [[TMP12]], i16 [[TMP9]], i64 4
+; CHECK-NEXT:    [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP8]], i64 5
+; CHECK-NEXT:    [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP11]], i64 6
+; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <8 x i16> [[TMP15]], i16 [[TMP10]], i64 7
+; CHECK-NEXT:    [[TMP17:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <8 x i16> [[TMP17]], i16 [[TMP3]], i64 4
+; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <8 x i16> [[TMP18]], i16 [[TMP2]], i64 5
+; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <8 x i16> [[TMP19]], i16 [[TMP5]], i64 6
+; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <8 x i16> [[TMP20]], i16 [[TMP4]], i64 7
+; CHECK-NEXT:    [[TMP22:%.*]] = mul <8 x i16> [[TMP16]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[TMP22]])
+; CHECK-NEXT:    ret i16 [[TMP23]]
+;
+entry:
+  %0 = load i16, i16* %x, align 2
+  %arrayidx1 = getelementptr inbounds i16, i16* %x, i64 1
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx2 = getelementptr inbounds i16, i16* %x, i64 2
+  %2 = load i16, i16* %arrayidx2, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %x, i64 3
+  %3 = load i16, i16* %arrayidx3, align 2
+  %idxprom = sext i32 %stride to i64
+  %arrayidx4 = getelementptr inbounds i16, i16* %x, i64 %idxprom
+  %4 = load i16, i16* %arrayidx4, align 2
+  %add5 = add nsw i32 %stride, 1
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds i16, i16* %x, i64 %idxprom6
+  %5 = load i16, i16* %arrayidx7, align 2
+  %add8 = add nsw i32 %stride, 2
+  %idxprom9 = sext i32 %add8 to i64
+  %arrayidx10 = getelementptr inbounds i16, i16* %x, i64 %idxprom9
+  %6 = load i16, i16* %arrayidx10, align 2
+  %add11 = add nsw i32 %stride, 3
+  %idxprom12 = sext i32 %add11 to i64
+  %arrayidx13 = getelementptr inbounds i16, i16* %x, i64 %idxprom12
+  %7 = load i16, i16* %arrayidx13, align 2
+  %8 = load i16, i16* %y, align 2
+  %arrayidx15 = getelementptr inbounds i16, i16* %y, i64 1
+  %9 = load i16, i16* %arrayidx15, align 2
+  %arrayidx16 = getelementptr inbounds i16, i16* %y, i64 2
+  %10 = load i16, i16* %arrayidx16, align 2
+  %arrayidx17 = getelementptr inbounds i16, i16* %y, i64 3
+  %11 = load i16, i16* %arrayidx17, align 2
+  %arrayidx20 = getelementptr inbounds i16, i16* %y, i64 %idxprom
+  %12 = load i16, i16* %arrayidx20, align 2
+  %arrayidx23 = getelementptr inbounds i16, i16* %y, i64 %idxprom6
+  %13 = load i16, i16* %arrayidx23, align 2
+  %arrayidx26 = getelementptr inbounds i16, i16* %y, i64 %idxprom9
+  %14 = load i16, i16* %arrayidx26, align 2
+  %arrayidx29 = getelementptr inbounds i16, i16* %y, i64 %idxprom12
+  %15 = load i16, i16* %arrayidx29, align 2
+  %mul = mul i16 %8, %0
+  %mul36 = mul i16 %9, %1
+  %add37 = add i16 %mul36, %mul
+  %mul42 = mul i16 %11, %3
+  %add43 = add i16 %add37, %mul42
+  %mul48 = mul i16 %10, %2
+  %add49 = add i16 %add43, %mul48
+  %mul54 = mul i16 %13, %5
+  %add55 = add i16 %add49, %mul54
+  %mul60 = mul i16 %12, %4
+  %add61 = add i16 %add55, %mul60
+  %mul66 = mul i16 %15, %7
+  %add67 = add i16 %add61, %mul66
+  %mul72 = mul i16 %14, %6
+  %add73 = add i16 %add67, %mul72
+  ret i16 %add73
+}
+
+define i32 @reduce_blockstrided4x4(i8* nocapture noundef readonly %p1, i32 noundef %off1, i8* nocapture noundef readonly %p2, i32 noundef %off2) {
+; CHECK-LABEL: @reduce_blockstrided4x4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[OFF1:%.*]] to i64
+; CHECK-NEXT:    [[IDX_EXT63:%.*]] = sext i32 [[OFF2:%.*]] to i64
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[P1:%.*]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, i8* [[P2:%.*]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 5
+; CHECK-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 6
+; CHECK-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 3
+; CHECK-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 7
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR64:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 [[IDX_EXT63]]
+; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX10_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 1
+; CHECK-NEXT:    [[ARRAYIDX15_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 5
+; CHECK-NEXT:    [[ARRAYIDX22_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 2
+; CHECK-NEXT:    [[ARRAYIDX27_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 6
+; CHECK-NEXT:    [[ARRAYIDX34_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 3
+; CHECK-NEXT:    [[ARRAYIDX39_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 7
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, i8* [[P2]], align 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* [[ARRAYIDX5]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX10]], align 1
+; CHECK-NEXT:    [[TMP3:%.*]] = load i8, i8* [[ARRAYIDX15]], align 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[ARRAYIDX22]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load i8, i8* [[ARRAYIDX27]], align 1
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[P1]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i8>, <4 x i8>* [[TMP6]], align 1
+; CHECK-NEXT:    [[TMP8:%.*]] = load i8, i8* [[ARRAYIDX34]], align 1
+; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[ARRAYIDX3]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP10:%.*]] = load <4 x i8>, <4 x i8>* [[TMP9]], align 1
+; CHECK-NEXT:    [[TMP11:%.*]] = load i8, i8* [[ARRAYIDX39]], align 1
+; CHECK-NEXT:    [[TMP12:%.*]] = load i8, i8* [[ADD_PTR64]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = load i8, i8* [[ARRAYIDX5_1]], align 1
+; CHECK-NEXT:    [[TMP14:%.*]] = load i8, i8* [[ARRAYIDX10_1]], align 1
+; CHECK-NEXT:    [[TMP15:%.*]] = load i8, i8* [[ARRAYIDX15_1]], align 1
+; CHECK-NEXT:    [[TMP16:%.*]] = load i8, i8* [[ARRAYIDX22_1]], align 1
+; CHECK-NEXT:    [[TMP17:%.*]] = load i8, i8* [[ARRAYIDX27_1]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = bitcast i8* [[ADD_PTR]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP19:%.*]] = load <4 x i8>, <4 x i8>* [[TMP18]], align 1
+; CHECK-NEXT:    [[TMP20:%.*]] = load i8, i8* [[ARRAYIDX34_1]], align 1
+; CHECK-NEXT:    [[TMP21:%.*]] = shufflevector <4 x i8> [[TMP7]], <4 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP22:%.*]] = insertelement <16 x i8> [[TMP21]], i8 [[TMP8]], i64 4
+; CHECK-NEXT:    [[TMP23:%.*]] = insertelement <16 x i8> [[TMP22]], i8 [[TMP4]], i64 5
+; CHECK-NEXT:    [[TMP24:%.*]] = insertelement <16 x i8> [[TMP23]], i8 [[TMP2]], i64 6
+; CHECK-NEXT:    [[TMP25:%.*]] = insertelement <16 x i8> [[TMP24]], i8 [[TMP0]], i64 7
+; CHECK-NEXT:    [[TMP26:%.*]] = shufflevector <4 x i8> [[TMP19]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP27:%.*]] = shufflevector <16 x i8> [[TMP25]], <16 x i8> [[TMP26]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP28:%.*]] = insertelement <16 x i8> [[TMP27]], i8 [[TMP20]], i64 12
+; CHECK-NEXT:    [[TMP29:%.*]] = insertelement <16 x i8> [[TMP28]], i8 [[TMP16]], i64 13
+; CHECK-NEXT:    [[TMP30:%.*]] = insertelement <16 x i8> [[TMP29]], i8 [[TMP14]], i64 14
+; CHECK-NEXT:    [[TMP31:%.*]] = insertelement <16 x i8> [[TMP30]], i8 [[TMP12]], i64 15
+; CHECK-NEXT:    [[TMP32:%.*]] = zext <16 x i8> [[TMP31]] to <16 x i32>
+; CHECK-NEXT:    [[TMP33:%.*]] = bitcast i8* [[ARRAYIDX3_1]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP34:%.*]] = load <4 x i8>, <4 x i8>* [[TMP33]], align 1
+; CHECK-NEXT:    [[TMP35:%.*]] = load i8, i8* [[ARRAYIDX39_1]], align 1
+; CHECK-NEXT:    [[TMP36:%.*]] = shufflevector <4 x i8> [[TMP10]], <4 x i8> poison, <16 x i32> <i32 1, i32 0, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP37:%.*]] = insertelement <16 x i8> [[TMP36]], i8 [[TMP11]], i64 4
+; CHECK-NEXT:    [[TMP38:%.*]] = insertelement <16 x i8> [[TMP37]], i8 [[TMP5]], i64 5
+; CHECK-NEXT:    [[TMP39:%.*]] = insertelement <16 x i8> [[TMP38]], i8 [[TMP3]], i64 6
+; CHECK-NEXT:    [[TMP40:%.*]] = insertelement <16 x i8> [[TMP39]], i8 [[TMP1]], i64 7
+; CHECK-NEXT:    [[TMP41:%.*]] = shufflevector <4 x i8> [[TMP34]], <4 x i8> poison, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP42:%.*]] = shufflevector <16 x i8> [[TMP40]], <16 x i8> [[TMP41]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP43:%.*]] = insertelement <16 x i8> [[TMP42]], i8 [[TMP35]], i64 12
+; CHECK-NEXT:    [[TMP44:%.*]] = insertelement <16 x i8> [[TMP43]], i8 [[TMP17]], i64 13
+; CHECK-NEXT:    [[TMP45:%.*]] = insertelement <16 x i8> [[TMP44]], i8 [[TMP15]], i64 14
+; CHECK-NEXT:    [[TMP46:%.*]] = insertelement <16 x i8> [[TMP45]], i8 [[TMP13]], i64 15
+; CHECK-NEXT:    [[TMP47:%.*]] = zext <16 x i8> [[TMP46]] to <16 x i32>
+; CHECK-NEXT:    [[TMP48:%.*]] = mul nuw nsw <16 x i32> [[TMP32]], [[TMP47]]
+; CHECK-NEXT:    [[TMP49:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP48]])
+; CHECK-NEXT:    ret i32 [[TMP49]]
+;
+entry:
+  %idx.ext = sext i32 %off1 to i64
+  %idx.ext63 = sext i32 %off2 to i64
+
+  %0 = load i8, i8* %p1, align 1
+  %conv = zext i8 %0 to i32
+  %1 = load i8, i8* %p2, align 1
+  %conv2 = zext i8 %1 to i32
+  %arrayidx3 = getelementptr inbounds i8, i8* %p1, i64 4
+  %2 = load i8, i8* %arrayidx3, align 1
+  %conv4 = zext i8 %2 to i32
+  %arrayidx5 = getelementptr inbounds i8, i8* %p2, i64 4
+  %3 = load i8, i8* %arrayidx5, align 1
+  %conv6 = zext i8 %3 to i32
+  %arrayidx8 = getelementptr inbounds i8, i8* %p1, i64 1
+  %4 = load i8, i8* %arrayidx8, align 1
+  %conv9 = zext i8 %4 to i32
+  %arrayidx10 = getelementptr inbounds i8, i8* %p2, i64 1
+  %5 = load i8, i8* %arrayidx10, align 1
+  %conv11 = zext i8 %5 to i32
+  %arrayidx13 = getelementptr inbounds i8, i8* %p1, i64 5
+  %6 = load i8, i8* %arrayidx13, align 1
+  %conv14 = zext i8 %6 to i32
+  %arrayidx15 = getelementptr inbounds i8, i8* %p2, i64 5
+  %7 = load i8, i8* %arrayidx15, align 1
+  %conv16 = zext i8 %7 to i32
+  %arrayidx20 = getelementptr inbounds i8, i8* %p1, i64 2
+  %8 = load i8, i8* %arrayidx20, align 1
+  %conv21 = zext i8 %8 to i32
+  %arrayidx22 = getelementptr inbounds i8, i8* %p2, i64 2
+  %9 = load i8, i8* %arrayidx22, align 1
+  %conv23 = zext i8 %9 to i32
+  %arrayidx25 = getelementptr inbounds i8, i8* %p1, i64 6
+  %10 = load i8, i8* %arrayidx25, align 1
+  %conv26 = zext i8 %10 to i32
+  %arrayidx27 = getelementptr inbounds i8, i8* %p2, i64 6
+  %11 = load i8, i8* %arrayidx27, align 1
+  %conv28 = zext i8 %11 to i32
+  %arrayidx32 = getelementptr inbounds i8, i8* %p1, i64 3
+  %12 = load i8, i8* %arrayidx32, align 1
+  %conv33 = zext i8 %12 to i32
+  %arrayidx34 = getelementptr inbounds i8, i8* %p2, i64 3
+  %13 = load i8, i8* %arrayidx34, align 1
+  %conv35 = zext i8 %13 to i32
+  %arrayidx37 = getelementptr inbounds i8, i8* %p1, i64 7
+  %14 = load i8, i8* %arrayidx37, align 1
+  %conv38 = zext i8 %14 to i32
+  %arrayidx39 = getelementptr inbounds i8, i8* %p2, i64 7
+  %15 = load i8, i8* %arrayidx39, align 1
+  %conv40 = zext i8 %15 to i32
+  %add.ptr = getelementptr inbounds i8, i8* %p1, i64 %idx.ext
+  %16 = load i8, i8* %add.ptr, align 1
+  %conv.1 = zext i8 %16 to i32
+  %add.ptr64 = getelementptr inbounds i8, i8* %p2, i64 %idx.ext63
+  %17 = load i8, i8* %add.ptr64, align 1
+  %conv2.1 = zext i8 %17 to i32
+  %arrayidx3.1 = getelementptr inbounds i8, i8* %add.ptr, i64 4
+  %18 = load i8, i8* %arrayidx3.1, align 1
+  %conv4.1 = zext i8 %18 to i32
+  %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
+  %19 = load i8, i8* %arrayidx5.1, align 1
+  %conv6.1 = zext i8 %19 to i32
+  %arrayidx8.1 = getelementptr inbounds i8, i8* %add.ptr, i64 1
+  %20 = load i8, i8* %arrayidx8.1, align 1
+  %conv9.1 = zext i8 %20 to i32
+  %arrayidx10.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 1
+  %21 = load i8, i8* %arrayidx10.1, align 1
+  %conv11.1 = zext i8 %21 to i32
+  %arrayidx13.1 = getelementptr inbounds i8, i8* %add.ptr, i64 5
+  %22 = load i8, i8* %arrayidx13.1, align 1
+  %conv14.1 = zext i8 %22 to i32
+  %arrayidx15.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 5
+  %23 = load i8, i8* %arrayidx15.1, align 1
+  %conv16.1 = zext i8 %23 to i32
+  %arrayidx20.1 = getelementptr inbounds i8, i8* %add.ptr, i64 2
+  %24 = load i8, i8* %arrayidx20.1, align 1
+  %conv21.1 = zext i8 %24 to i32
+  %arrayidx22.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 2
+  %25 = load i8, i8* %arrayidx22.1, align 1
+  %conv23.1 = zext i8 %25 to i32
+  %arrayidx25.1 = getelementptr inbounds i8, i8* %add.ptr, i64 6
+  %26 = load i8, i8* %arrayidx25.1, align 1
+  %conv26.1 = zext i8 %26 to i32
+  %arrayidx27.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 6
+  %27 = load i8, i8* %arrayidx27.1, align 1
+  %conv28.1 = zext i8 %27 to i32
+  %arrayidx32.1 = getelementptr inbounds i8, i8* %add.ptr, i64 3
+  %28 = load i8, i8* %arrayidx32.1, align 1
+  %conv33.1 = zext i8 %28 to i32
+  %arrayidx34.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 3
+  %29 = load i8, i8* %arrayidx34.1, align 1
+  %conv35.1 = zext i8 %29 to i32
+  %arrayidx37.1 = getelementptr inbounds i8, i8* %add.ptr, i64 7
+  %30 = load i8, i8* %arrayidx37.1, align 1
+  %conv38.1 = zext i8 %30 to i32
+  %arrayidx39.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 7
+  %31 = load i8, i8* %arrayidx39.1, align 1
+  %conv40.1 = zext i8 %31 to i32
+  %add.ptr.1 = getelementptr inbounds i8, i8* %add.ptr, i64 %idx.ext
+  %32 = load i8, i8* %add.ptr.1, align 1
+  %conv.2 = zext i8 %32 to i32
+  %add.ptr64.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 %idx.ext63
+  %33 = load i8, i8* %add.ptr64.1, align 1
+  %conv2.2 = zext i8 %33 to i32
+  %arrayidx3.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 4
+  %34 = load i8, i8* %arrayidx3.2, align 1
+  %conv4.2 = zext i8 %34 to i32
+  %arrayidx5.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 4
+  %35 = load i8, i8* %arrayidx5.2, align 1
+  %conv6.2 = zext i8 %35 to i32
+  %arrayidx8.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 1
+  %36 = load i8, i8* %arrayidx8.2, align 1
+  %conv9.2 = zext i8 %36 to i32
+  %arrayidx10.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 1
+  %37 = load i8, i8* %arrayidx10.2, align 1
+  %conv11.2 = zext i8 %37 to i32
+  %arrayidx13.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 5
+  %38 = load i8, i8* %arrayidx13.2, align 1
+  %conv14.2 = zext i8 %38 to i32
+  %arrayidx15.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 5
+  %39 = load i8, i8* %arrayidx15.2, align 1
+  %conv16.2 = zext i8 %39 to i32
+  %arrayidx20.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 2
+  %40 = load i8, i8* %arrayidx20.2, align 1
+  %conv21.2 = zext i8 %40 to i32
+  %arrayidx22.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 2
+  %41 = load i8, i8* %arrayidx22.2, align 1
+  %conv23.2 = zext i8 %41 to i32
+  %arrayidx25.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 6
+  %42 = load i8, i8* %arrayidx25.2, align 1
+  %conv26.2 = zext i8 %42 to i32
+  %arrayidx27.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 6
+  %43 = load i8, i8* %arrayidx27.2, align 1
+  %conv28.2 = zext i8 %43 to i32
+  %arrayidx32.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 3
+  %44 = load i8, i8* %arrayidx32.2, align 1
+  %conv33.2 = zext i8 %44 to i32
+  %arrayidx34.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 3
+  %45 = load i8, i8* %arrayidx34.2, align 1
+  %conv35.2 = zext i8 %45 to i32
+  %arrayidx37.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 7
+  %46 = load i8, i8* %arrayidx37.2, align 1
+  %conv38.2 = zext i8 %46 to i32
+  %arrayidx39.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 7
+  %47 = load i8, i8* %arrayidx39.2, align 1
+  %conv40.2 = zext i8 %47 to i32
+  %add.ptr.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 %idx.ext
+  %48 = load i8, i8* %add.ptr.2, align 1
+  %conv.3 = zext i8 %48 to i32
+  %add.ptr64.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 %idx.ext63
+  %49 = load i8, i8* %add.ptr64.2, align 1
+  %conv2.3 = zext i8 %49 to i32
+  %arrayidx3.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 4
+  %50 = load i8, i8* %arrayidx3.3, align 1
+  %conv4.3 = zext i8 %50 to i32
+  %arrayidx5.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 4
+  %51 = load i8, i8* %arrayidx5.3, align 1
+  %conv6.3 = zext i8 %51 to i32
+  %arrayidx8.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 1
+  %52 = load i8, i8* %arrayidx8.3, align 1
+  %conv9.3 = zext i8 %52 to i32
+  %arrayidx10.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 1
+  %53 = load i8, i8* %arrayidx10.3, align 1
+  %conv11.3 = zext i8 %53 to i32
+  %arrayidx13.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 5
+  %54 = load i8, i8* %arrayidx13.3, align 1
+  %conv14.3 = zext i8 %54 to i32
+  %arrayidx15.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 5
+  %55 = load i8, i8* %arrayidx15.3, align 1
+  %conv16.3 = zext i8 %55 to i32
+  %arrayidx20.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 2
+  %56 = load i8, i8* %arrayidx20.3, align 1
+  %conv21.3 = zext i8 %56 to i32
+  %arrayidx22.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 2
+  %57 = load i8, i8* %arrayidx22.3, align 1
+  %conv23.3 = zext i8 %57 to i32
+  %arrayidx25.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 6
+  %58 = load i8, i8* %arrayidx25.3, align 1
+  %conv26.3 = zext i8 %58 to i32
+  %arrayidx27.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 6
+  %59 = load i8, i8* %arrayidx27.3, align 1
+  %conv28.3 = zext i8 %59 to i32
+  %arrayidx32.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 3
+  %60 = load i8, i8* %arrayidx32.3, align 1
+  %conv33.3 = zext i8 %60 to i32
+  %arrayidx34.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 3
+  %61 = load i8, i8* %arrayidx34.3, align 1
+  %conv35.3 = zext i8 %61 to i32
+  %arrayidx37.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 7
+  %62 = load i8, i8* %arrayidx37.3, align 1
+  %conv38.3 = zext i8 %62 to i32
+  %arrayidx39.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 7
+  %63 = load i8, i8* %arrayidx39.3, align 1
+  %conv40.3 = zext i8 %63 to i32
+
+  %m1 = mul i32 %conv, %conv4
+  %m2 = mul i32 %conv9, %conv14
+  %m3 = mul i32 %conv21, %conv26
+  %m4 = mul i32 %conv33, %conv38
+  %m8 = mul i32 %conv2, %conv6
+  %m7 = mul i32 %conv11, %conv16
+  %m6 = mul i32 %conv23, %conv28
+  %m5 = mul i32 %conv35, %conv40
+  %m9 = mul i32 %conv.1, %conv4.1
+  %m10 = mul i32 %conv9.1, %conv14.1
+  %m11 = mul i32 %conv21.1, %conv26.1
+  %m12 = mul i32 %conv33.1, %conv38.1
+  %m16 = mul i32 %conv2.1, %conv6.1
+  %m15 = mul i32 %conv11.1, %conv16.1
+  %m14 = mul i32 %conv23.1, %conv28.1
+  %m13 = mul i32 %conv35.1, %conv40.1
+
+  %a2 = add i32 %m1, %m2
+  %a3 = add i32 %a2, %m3
+  %a4 = add i32 %a3, %m4
+  %a5 = add i32 %a4, %m5
+  %a6 = add i32 %a5, %m6
+  %a7 = add i32 %a6, %m7
+  %a8 = add i32 %a7, %m8
+  %a9 = add i32 %a8, %m9
+  %a10 = add i32 %a9, %m10
+  %a11 = add i32 %a10, %m11
+  %a12 = add i32 %a11, %m12
+  %a13 = add i32 %a12, %m13
+  %a14 = add i32 %a13, %m14
+  %a15 = add i32 %a14, %m15
+  %a16 = add i32 %a15, %m16
+  ret i32 %a16
+}
+
+define void @store_blockstrided3(i32* nocapture noundef readonly %x, i32* nocapture noundef readonly %y, i32* nocapture noundef writeonly %z, i32 noundef %stride) {
+; CHECK-LABEL: @store_blockstrided3(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[X:%.*]], i64 2
+; CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; CHECK-NEXT:    [[ADD4:%.*]] = add nsw i32 [[STRIDE:%.*]], 1
+; CHECK-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[ADD4]] to i64
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 [[IDXPROM5]]
+; CHECK-NEXT:    [[MUL:%.*]] = shl nsw i32 [[STRIDE]], 1
+; CHECK-NEXT:    [[IDXPROM11:%.*]] = sext i32 [[MUL]] to i64
+; CHECK-NEXT:    [[ARRAYIDX12:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 [[IDXPROM11]]
+; CHECK-NEXT:    [[ADD18:%.*]] = add nsw i32 [[MUL]], 2
+; CHECK-NEXT:    [[IDXPROM19:%.*]] = sext i32 [[ADD18]] to i64
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 [[IDXPROM19]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[ARRAYIDX20]], align 4
+; CHECK-NEXT:    [[MUL21:%.*]] = mul nsw i32 [[STRIDE]], 3
+; CHECK-NEXT:    [[IDXPROM23:%.*]] = sext i32 [[MUL21]] to i64
+; CHECK-NEXT:    [[ARRAYIDX24:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 [[IDXPROM23]]
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX24]], align 4
+; CHECK-NEXT:    [[ADD26:%.*]] = add nsw i32 [[MUL21]], 1
+; CHECK-NEXT:    [[IDXPROM27:%.*]] = sext i32 [[ADD26]] to i64
+; CHECK-NEXT:    [[ARRAYIDX28:%.*]] = getelementptr inbounds i32, i32* [[X]], i64 [[IDXPROM27]]
+; CHECK-NEXT:    [[ARRAYIDX35:%.*]] = getelementptr inbounds i32, i32* [[Y:%.*]], i64 2
+; CHECK-NEXT:    [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX35]], align 4
+; CHECK-NEXT:    [[ARRAYIDX41:%.*]] = getelementptr inbounds i32, i32* [[Y]], i64 [[IDXPROM5]]
+; CHECK-NEXT:    [[ARRAYIDX48:%.*]] = getelementptr inbounds i32, i32* [[Y]], i64 [[IDXPROM11]]
+; CHECK-NEXT:    [[ARRAYIDX56:%.*]] = getelementptr inbounds i32, i32* [[Y]], i64 [[IDXPROM19]]
+; CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX56]], align 4
+; CHECK-NEXT:    [[ARRAYIDX60:%.*]] = getelementptr inbounds i32, i32* [[Y]], i64 [[IDXPROM23]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX60]], align 4
+; CHECK-NEXT:    [[ARRAYIDX64:%.*]] = getelementptr inbounds i32, i32* [[Y]], i64 [[IDXPROM27]]
+; CHECK-NEXT:    [[ARRAYIDX72:%.*]] = getelementptr inbounds i32, i32* [[Z:%.*]], i64 1
+; CHECK-NEXT:    [[MUL73:%.*]] = mul nsw i32 [[TMP3]], [[TMP0]]
+; CHECK-NEXT:    [[ARRAYIDX76:%.*]] = getelementptr inbounds i32, i32* [[Z]], i64 6
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i32* [[X]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x i32>, <2 x i32>* [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i32* [[ARRAYIDX6]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP9:%.*]] = load <2 x i32>, <2 x i32>* [[TMP8]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = bitcast i32* [[Y]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP11:%.*]] = load <2 x i32>, <2 x i32>* [[TMP10]], align 4
+; CHECK-NEXT:    [[TMP12:%.*]] = bitcast i32* [[ARRAYIDX41]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP13:%.*]] = load <2 x i32>, <2 x i32>* [[TMP12]], align 4
+; CHECK-NEXT:    [[TMP14:%.*]] = shufflevector <2 x i32> [[TMP11]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP15:%.*]] = shufflevector <2 x i32> [[TMP13]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP16:%.*]] = shufflevector <4 x i32> [[TMP14]], <4 x i32> [[TMP15]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT:    [[TMP17:%.*]] = shufflevector <2 x i32> [[TMP7]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP18:%.*]] = shufflevector <2 x i32> [[TMP9]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP19:%.*]] = shufflevector <4 x i32> [[TMP17]], <4 x i32> [[TMP18]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT:    [[TMP20:%.*]] = mul nsw <4 x i32> [[TMP16]], [[TMP19]]
+; CHECK-NEXT:    [[TMP21:%.*]] = bitcast i32* [[ARRAYIDX72]] to <4 x i32>*
+; CHECK-NEXT:    [[ARRAYIDX84:%.*]] = getelementptr inbounds i32, i32* [[Z]], i64 7
+; CHECK-NEXT:    [[MUL85:%.*]] = mul nsw i32 [[TMP4]], [[TMP1]]
+; CHECK-NEXT:    [[MUL87:%.*]] = mul nsw i32 [[TMP5]], [[TMP2]]
+; CHECK-NEXT:    [[ARRAYIDX88:%.*]] = getelementptr inbounds i32, i32* [[Z]], i64 11
+; CHECK-NEXT:    [[TMP22:%.*]] = bitcast i32* [[ARRAYIDX12]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP23:%.*]] = load <2 x i32>, <2 x i32>* [[TMP22]], align 4
+; CHECK-NEXT:    [[TMP24:%.*]] = bitcast i32* [[ARRAYIDX28]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP25:%.*]] = load <2 x i32>, <2 x i32>* [[TMP24]], align 4
+; CHECK-NEXT:    [[TMP26:%.*]] = bitcast i32* [[ARRAYIDX48]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP27:%.*]] = load <2 x i32>, <2 x i32>* [[TMP26]], align 4
+; CHECK-NEXT:    [[TMP28:%.*]] = bitcast i32* [[ARRAYIDX64]] to <2 x i32>*
+; CHECK-NEXT:    [[TMP29:%.*]] = load <2 x i32>, <2 x i32>* [[TMP28]], align 4
+; CHECK-NEXT:    store i32 [[MUL73]], i32* [[Z]], align 4
+; CHECK-NEXT:    store <4 x i32> [[TMP20]], <4 x i32>* [[TMP21]], align 4
+; CHECK-NEXT:    store i32 [[MUL85]], i32* [[ARRAYIDX76]], align 4
+; CHECK-NEXT:    store i32 [[MUL87]], i32* [[ARRAYIDX88]], align 4
+; CHECK-NEXT:    [[TMP30:%.*]] = shufflevector <2 x i32> [[TMP27]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP31:%.*]] = shufflevector <2 x i32> [[TMP29]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP32:%.*]] = shufflevector <4 x i32> [[TMP30]], <4 x i32> [[TMP31]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT:    [[TMP33:%.*]] = shufflevector <2 x i32> [[TMP23]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP34:%.*]] = shufflevector <2 x i32> [[TMP25]], <2 x i32> poison, <4 x i32> <i32 1, i32 0, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP35:%.*]] = shufflevector <4 x i32> [[TMP33]], <4 x i32> [[TMP34]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT:    [[TMP36:%.*]] = mul nsw <4 x i32> [[TMP32]], [[TMP35]]
+; CHECK-NEXT:    [[TMP37:%.*]] = bitcast i32* [[ARRAYIDX84]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP36]], <4 x i32>* [[TMP37]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load i32, i32* %x, align 4
+  %arrayidx1 = getelementptr inbounds i32, i32* %x, i64 1
+  %1 = load i32, i32* %arrayidx1, align 4
+  %arrayidx2 = getelementptr inbounds i32, i32* %x, i64 2
+  %2 = load i32, i32* %arrayidx2, align 4
+  %add4 = add nsw i32 %stride, 1
+  %idxprom5 = sext i32 %add4 to i64
+  %arrayidx6 = getelementptr inbounds i32, i32* %x, i64 %idxprom5
+  %3 = load i32, i32* %arrayidx6, align 4
+  %add7 = add nsw i32 %stride, 2
+  %idxprom8 = sext i32 %add7 to i64
+  %arrayidx9 = getelementptr inbounds i32, i32* %x, i64 %idxprom8
+  %4 = load i32, i32* %arrayidx9, align 4
+  %mul = shl nsw i32 %stride, 1
+  %idxprom11 = sext i32 %mul to i64
+  %arrayidx12 = getelementptr inbounds i32, i32* %x, i64 %idxprom11
+  %5 = load i32, i32* %arrayidx12, align 4
+  %add14 = or i32 %mul, 1
+  %idxprom15 = sext i32 %add14 to i64
+  %arrayidx16 = getelementptr inbounds i32, i32* %x, i64 %idxprom15
+  %6 = load i32, i32* %arrayidx16, align 4
+  %add18 = add nsw i32 %mul, 2
+  %idxprom19 = sext i32 %add18 to i64
+  %arrayidx20 = getelementptr inbounds i32, i32* %x, i64 %idxprom19
+  %7 = load i32, i32* %arrayidx20, align 4
+  %mul21 = mul nsw i32 %stride, 3
+  %idxprom23 = sext i32 %mul21 to i64
+  %arrayidx24 = getelementptr inbounds i32, i32* %x, i64 %idxprom23
+  %8 = load i32, i32* %arrayidx24, align 4
+  %add26 = add nsw i32 %mul21, 1
+  %idxprom27 = sext i32 %add26 to i64
+  %arrayidx28 = getelementptr inbounds i32, i32* %x, i64 %idxprom27
+  %9 = load i32, i32* %arrayidx28, align 4
+  %add30 = add nsw i32 %mul21, 2
+  %idxprom31 = sext i32 %add30 to i64
+  %arrayidx32 = getelementptr inbounds i32, i32* %x, i64 %idxprom31
+  %10 = load i32, i32* %arrayidx32, align 4
+  %11 = load i32, i32* %y, align 4
+  %arrayidx34 = getelementptr inbounds i32, i32* %y, i64 1
+  %12 = load i32, i32* %arrayidx34, align 4
+  %arrayidx35 = getelementptr inbounds i32, i32* %y, i64 2
+  %13 = load i32, i32* %arrayidx35, align 4
+  %arrayidx41 = getelementptr inbounds i32, i32* %y, i64 %idxprom5
+  %14 = load i32, i32* %arrayidx41, align 4
+  %arrayidx44 = getelementptr inbounds i32, i32* %y, i64 %idxprom8
+  %15 = load i32, i32* %arrayidx44, align 4
+  %arrayidx48 = getelementptr inbounds i32, i32* %y, i64 %idxprom11
+  %16 = load i32, i32* %arrayidx48, align 4
+  %arrayidx52 = getelementptr inbounds i32, i32* %y, i64 %idxprom15
+  %17 = load i32, i32* %arrayidx52, align 4
+  %arrayidx56 = getelementptr inbounds i32, i32* %y, i64 %idxprom19
+  %18 = load i32, i32* %arrayidx56, align 4
+  %arrayidx60 = getelementptr inbounds i32, i32* %y, i64 %idxprom23
+  %19 = load i32, i32* %arrayidx60, align 4
+  %arrayidx64 = getelementptr inbounds i32, i32* %y, i64 %idxprom27
+  %20 = load i32, i32* %arrayidx64, align 4
+  %arrayidx68 = getelementptr inbounds i32, i32* %y, i64 %idxprom31
+  %21 = load i32, i32* %arrayidx68, align 4
+  %mul69 = mul nsw i32 %11, %0
+  %arrayidx70 = getelementptr inbounds i32, i32* %z, i64 2
+  store i32 %mul69, i32* %arrayidx70, align 4
+  %mul71 = mul nsw i32 %12, %1
+  %arrayidx72 = getelementptr inbounds i32, i32* %z, i64 1
+  store i32 %mul71, i32* %arrayidx72, align 4
+  %mul73 = mul nsw i32 %13, %2
+  store i32 %mul73, i32* %z, align 4
+  %arrayidx76 = getelementptr inbounds i32, i32* %z, i64 6
+  %mul77 = mul nsw i32 %14, %3
+  %arrayidx78 = getelementptr inbounds i32, i32* %z, i64 4
+  store i32 %mul77, i32* %arrayidx78, align 4
+  %mul79 = mul nsw i32 %15, %4
+  %arrayidx80 = getelementptr inbounds i32, i32* %z, i64 3
+  store i32 %mul79, i32* %arrayidx80, align 4
+  %mul81 = mul nsw i32 %16, %5
+  %arrayidx82 = getelementptr inbounds i32, i32* %z, i64 8
+  store i32 %mul81, i32* %arrayidx82, align 4
+  %mul83 = mul nsw i32 %17, %6
+  %arrayidx84 = getelementptr inbounds i32, i32* %z, i64 7
+  store i32 %mul83, i32* %arrayidx84, align 4
+  %mul85 = mul nsw i32 %18, %7
+  store i32 %mul85, i32* %arrayidx76, align 4
+  %mul87 = mul nsw i32 %19, %8
+  %arrayidx88 = getelementptr inbounds i32, i32* %z, i64 11
+  store i32 %mul87, i32* %arrayidx88, align 4
+  %mul89 = mul nsw i32 %20, %9
+  %arrayidx90 = getelementptr inbounds i32, i32* %z, i64 10
+  store i32 %mul89, i32* %arrayidx90, align 4
+  %mul91 = mul nsw i32 %21, %10
+  %arrayidx92 = getelementptr inbounds i32, i32* %z, i64 9
+  store i32 %mul91, i32* %arrayidx92, align 4
+  ret void
+}
+
+define void @store_blockstrided4(i16* nocapture noundef readonly %x, i16* nocapture noundef readonly %y, i32 noundef %stride, i16 *%dst0) {
+; CHECK-LABEL: @store_blockstrided4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[IDXPROM:%.*]] = sext i32 [[STRIDE:%.*]] to i64
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[X:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ADD5:%.*]] = add nsw i32 [[STRIDE]], 1
+; CHECK-NEXT:    [[IDXPROM6:%.*]] = sext i32 [[ADD5]] to i64
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[ADD8:%.*]] = add nsw i32 [[STRIDE]], 2
+; CHECK-NEXT:    [[IDXPROM9:%.*]] = sext i32 [[ADD8]] to i64
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[ADD11:%.*]] = add nsw i32 [[STRIDE]], 3
+; CHECK-NEXT:    [[IDXPROM12:%.*]] = sext i32 [[ADD11]] to i64
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i16, i16* [[X]], i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i16, i16* [[Y:%.*]], i64 [[IDXPROM]]
+; CHECK-NEXT:    [[ARRAYIDX23:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM6]]
+; CHECK-NEXT:    [[ARRAYIDX26:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM9]]
+; CHECK-NEXT:    [[ARRAYIDX29:%.*]] = getelementptr inbounds i16, i16* [[Y]], i64 [[IDXPROM12]]
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i16* [[X]] to <4 x i16>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, <4 x i16>* [[TMP0]], align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2
+; CHECK-NEXT:    [[TMP3:%.*]] = load i16, i16* [[ARRAYIDX7]], align 2
+; CHECK-NEXT:    [[TMP4:%.*]] = load i16, i16* [[ARRAYIDX10]], align 2
+; CHECK-NEXT:    [[TMP5:%.*]] = load i16, i16* [[ARRAYIDX13]], align 2
+; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i16* [[Y]] to <4 x i16>*
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x i16>, <4 x i16>* [[TMP6]], align 2
+; CHECK-NEXT:    [[TMP8:%.*]] = load i16, i16* [[ARRAYIDX20]], align 2
+; CHECK-NEXT:    [[TMP9:%.*]] = load i16, i16* [[ARRAYIDX23]], align 2
+; CHECK-NEXT:    [[TMP10:%.*]] = load i16, i16* [[ARRAYIDX26]], align 2
+; CHECK-NEXT:    [[TMP11:%.*]] = load i16, i16* [[ARRAYIDX29]], align 2
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <4 x i16> [[TMP7]], <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP13:%.*]] = insertelement <8 x i16> [[TMP12]], i16 [[TMP9]], i64 4
+; CHECK-NEXT:    [[TMP14:%.*]] = insertelement <8 x i16> [[TMP13]], i16 [[TMP8]], i64 5
+; CHECK-NEXT:    [[TMP15:%.*]] = insertelement <8 x i16> [[TMP14]], i16 [[TMP11]], i64 6
+; CHECK-NEXT:    [[TMP16:%.*]] = insertelement <8 x i16> [[TMP15]], i16 [[TMP10]], i64 7
+; CHECK-NEXT:    [[TMP17:%.*]] = shufflevector <4 x i16> [[TMP1]], <4 x i16> poison, <8 x i32> <i32 0, i32 1, i32 3, i32 2, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP18:%.*]] = insertelement <8 x i16> [[TMP17]], i16 [[TMP3]], i64 4
+; CHECK-NEXT:    [[TMP19:%.*]] = insertelement <8 x i16> [[TMP18]], i16 [[TMP2]], i64 5
+; CHECK-NEXT:    [[TMP20:%.*]] = insertelement <8 x i16> [[TMP19]], i16 [[TMP5]], i64 6
+; CHECK-NEXT:    [[TMP21:%.*]] = insertelement <8 x i16> [[TMP20]], i16 [[TMP4]], i64 7
+; CHECK-NEXT:    [[TMP22:%.*]] = mul <8 x i16> [[TMP16]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast i16* [[DST0:%.*]] to <8 x i16>*
+; CHECK-NEXT:    store <8 x i16> [[TMP22]], <8 x i16>* [[TMP23]], align 2
+; CHECK-NEXT:    ret void
+;
+entry:
+  %0 = load i16, i16* %x, align 2
+  %arrayidx1 = getelementptr inbounds i16, i16* %x, i64 1
+  %1 = load i16, i16* %arrayidx1, align 2
+  %arrayidx2 = getelementptr inbounds i16, i16* %x, i64 2
+  %2 = load i16, i16* %arrayidx2, align 2
+  %arrayidx3 = getelementptr inbounds i16, i16* %x, i64 3
+  %3 = load i16, i16* %arrayidx3, align 2
+  %idxprom = sext i32 %stride to i64
+  %arrayidx4 = getelementptr inbounds i16, i16* %x, i64 %idxprom
+  %4 = load i16, i16* %arrayidx4, align 2
+  %add5 = add nsw i32 %stride, 1
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds i16, i16* %x, i64 %idxprom6
+  %5 = load i16, i16* %arrayidx7, align 2
+  %add8 = add nsw i32 %stride, 2
+  %idxprom9 = sext i32 %add8 to i64
+  %arrayidx10 = getelementptr inbounds i16, i16* %x, i64 %idxprom9
+  %6 = load i16, i16* %arrayidx10, align 2
+  %add11 = add nsw i32 %stride, 3
+  %idxprom12 = sext i32 %add11 to i64
+  %arrayidx13 = getelementptr inbounds i16, i16* %x, i64 %idxprom12
+  %7 = load i16, i16* %arrayidx13, align 2
+  %8 = load i16, i16* %y, align 2
+  %arrayidx15 = getelementptr inbounds i16, i16* %y, i64 1
+  %9 = load i16, i16* %arrayidx15, align 2
+  %arrayidx16 = getelementptr inbounds i16, i16* %y, i64 2
+  %10 = load i16, i16* %arrayidx16, align 2
+  %arrayidx17 = getelementptr inbounds i16, i16* %y, i64 3
+  %11 = load i16, i16* %arrayidx17, align 2
+  %arrayidx20 = getelementptr inbounds i16, i16* %y, i64 %idxprom
+  %12 = load i16, i16* %arrayidx20, align 2
+  %arrayidx23 = getelementptr inbounds i16, i16* %y, i64 %idxprom6
+  %13 = load i16, i16* %arrayidx23, align 2
+  %arrayidx26 = getelementptr inbounds i16, i16* %y, i64 %idxprom9
+  %14 = load i16, i16* %arrayidx26, align 2
+  %arrayidx29 = getelementptr inbounds i16, i16* %y, i64 %idxprom12
+  %15 = load i16, i16* %arrayidx29, align 2
+  %mul = mul i16 %8, %0
+  %mul36 = mul i16 %9, %1
+  %mul42 = mul i16 %11, %3
+  %mul48 = mul i16 %10, %2
+  %mul54 = mul i16 %13, %5
+  %mul60 = mul i16 %12, %4
+  %mul66 = mul i16 %15, %7
+  %mul72 = mul i16 %14, %6
+  %dst1 = getelementptr inbounds i16, i16* %dst0, i64 1
+  %dst2 = getelementptr inbounds i16, i16* %dst0, i64 2
+  %dst3 = getelementptr inbounds i16, i16* %dst0, i64 3
+  %dst4 = getelementptr inbounds i16, i16* %dst0, i64 4
+  %dst5 = getelementptr inbounds i16, i16* %dst0, i64 5
+  %dst6 = getelementptr inbounds i16, i16* %dst0, i64 6
+  %dst7 = getelementptr inbounds i16, i16* %dst0, i64 7
+  store i16 %mul, i16* %dst0
+  store i16 %mul36, i16* %dst1
+  store i16 %mul42, i16* %dst2
+  store i16 %mul48, i16* %dst3
+  store i16 %mul54, i16* %dst4
+  store i16 %mul60, i16* %dst5
+  store i16 %mul66, i16* %dst6
+  store i16 %mul72, i16* %dst7
+  ret void
+}
+
+define void @store_blockstrided4x4(i8* nocapture noundef readonly %p1, i32 noundef %off1, i8* nocapture noundef readonly %p2, i32 noundef %off2, i32 *%dst0) {
+; CHECK-LABEL: @store_blockstrided4x4(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[OFF1:%.*]] to i64
+; CHECK-NEXT:    [[IDX_EXT63:%.*]] = sext i32 [[OFF2:%.*]] to i64
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[P1:%.*]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, i8* [[P2:%.*]], i64 4
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR64:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 [[IDX_EXT63]]
+; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 4
+; CHECK-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 4
+; CHECK-NEXT:    [[DST4:%.*]] = getelementptr inbounds i32, i32* [[DST0:%.*]], i64 4
+; CHECK-NEXT:    [[DST8:%.*]] = getelementptr inbounds i32, i32* [[DST0]], i64 8
+; CHECK-NEXT:    [[DST12:%.*]] = getelementptr inbounds i32, i32* [[DST0]], i64 12
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast i8* [[P1]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, <4 x i8>* [[TMP0]], align 1
+; CHECK-NEXT:    [[TMP2:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i32>
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[ARRAYIDX3]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP4:%.*]] = load <4 x i8>, <4 x i8>* [[TMP3]], align 1
+; CHECK-NEXT:    [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
+; CHECK-NEXT:    [[TMP6:%.*]] = mul nuw nsw <4 x i32> [[TMP2]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast i32* [[DST0]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[P2]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x i8>, <4 x i8>* [[TMP8]], align 1
+; CHECK-NEXT:    [[TMP10:%.*]] = zext <4 x i8> [[TMP9]] to <4 x i32>
+; CHECK-NEXT:    [[TMP11:%.*]] = bitcast i8* [[ARRAYIDX5]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP12:%.*]] = load <4 x i8>, <4 x i8>* [[TMP11]], align 1
+; CHECK-NEXT:    [[TMP13:%.*]] = zext <4 x i8> [[TMP12]] to <4 x i32>
+; CHECK-NEXT:    [[TMP14:%.*]] = mul nuw nsw <4 x i32> [[TMP10]], [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = bitcast i32* [[DST4]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast i8* [[ADD_PTR]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP17:%.*]] = load <4 x i8>, <4 x i8>* [[TMP16]], align 1
+; CHECK-NEXT:    [[TMP18:%.*]] = zext <4 x i8> [[TMP17]] to <4 x i32>
+; CHECK-NEXT:    [[TMP19:%.*]] = bitcast i8* [[ARRAYIDX3_1]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP20:%.*]] = load <4 x i8>, <4 x i8>* [[TMP19]], align 1
+; CHECK-NEXT:    [[TMP21:%.*]] = zext <4 x i8> [[TMP20]] to <4 x i32>
+; CHECK-NEXT:    [[TMP22:%.*]] = mul nuw nsw <4 x i32> [[TMP18]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = bitcast i32* [[DST8]] to <4 x i32>*
+; CHECK-NEXT:    [[TMP24:%.*]] = bitcast i8* [[ADD_PTR64]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP25:%.*]] = load <4 x i8>, <4 x i8>* [[TMP24]], align 1
+; CHECK-NEXT:    [[TMP26:%.*]] = zext <4 x i8> [[TMP25]] to <4 x i32>
+; CHECK-NEXT:    [[TMP27:%.*]] = bitcast i8* [[ARRAYIDX5_1]] to <4 x i8>*
+; CHECK-NEXT:    [[TMP28:%.*]] = load <4 x i8>, <4 x i8>* [[TMP27]], align 1
+; CHECK-NEXT:    [[TMP29:%.*]] = zext <4 x i8> [[TMP28]] to <4 x i32>
+; CHECK-NEXT:    [[TMP30:%.*]] = mul nuw nsw <4 x i32> [[TMP26]], [[TMP29]]
+; CHECK-NEXT:    store <4 x i32> [[TMP6]], <4 x i32>* [[TMP7]], align 4
+; CHECK-NEXT:    store <4 x i32> [[TMP14]], <4 x i32>* [[TMP15]], align 4
+; CHECK-NEXT:    store <4 x i32> [[TMP22]], <4 x i32>* [[TMP23]], align 4
+; CHECK-NEXT:    [[TMP31:%.*]] = bitcast i32* [[DST12]] to <4 x i32>*
+; CHECK-NEXT:    store <4 x i32> [[TMP30]], <4 x i32>* [[TMP31]], align 4
+; CHECK-NEXT:    ret void
+;
+entry:
+  %idx.ext = sext i32 %off1 to i64
+  %idx.ext63 = sext i32 %off2 to i64
+
+  %0 = load i8, i8* %p1, align 1
+  %conv = zext i8 %0 to i32
+  %1 = load i8, i8* %p2, align 1
+  %conv2 = zext i8 %1 to i32
+  %arrayidx3 = getelementptr inbounds i8, i8* %p1, i64 4
+  %2 = load i8, i8* %arrayidx3, align 1
+  %conv4 = zext i8 %2 to i32
+  %arrayidx5 = getelementptr inbounds i8, i8* %p2, i64 4
+  %3 = load i8, i8* %arrayidx5, align 1
+  %conv6 = zext i8 %3 to i32
+  %arrayidx8 = getelementptr inbounds i8, i8* %p1, i64 1
+  %4 = load i8, i8* %arrayidx8, align 1
+  %conv9 = zext i8 %4 to i32
+  %arrayidx10 = getelementptr inbounds i8, i8* %p2, i64 1
+  %5 = load i8, i8* %arrayidx10, align 1
+  %conv11 = zext i8 %5 to i32
+  %arrayidx13 = getelementptr inbounds i8, i8* %p1, i64 5
+  %6 = load i8, i8* %arrayidx13, align 1
+  %conv14 = zext i8 %6 to i32
+  %arrayidx15 = getelementptr inbounds i8, i8* %p2, i64 5
+  %7 = load i8, i8* %arrayidx15, align 1
+  %conv16 = zext i8 %7 to i32
+  %arrayidx20 = getelementptr inbounds i8, i8* %p1, i64 2
+  %8 = load i8, i8* %arrayidx20, align 1
+  %conv21 = zext i8 %8 to i32
+  %arrayidx22 = getelementptr inbounds i8, i8* %p2, i64 2
+  %9 = load i8, i8* %arrayidx22, align 1
+  %conv23 = zext i8 %9 to i32
+  %arrayidx25 = getelementptr inbounds i8, i8* %p1, i64 6
+  %10 = load i8, i8* %arrayidx25, align 1
+  %conv26 = zext i8 %10 to i32
+  %arrayidx27 = getelementptr inbounds i8, i8* %p2, i64 6
+  %11 = load i8, i8* %arrayidx27, align 1
+  %conv28 = zext i8 %11 to i32
+  %arrayidx32 = getelementptr inbounds i8, i8* %p1, i64 3
+  %12 = load i8, i8* %arrayidx32, align 1
+  %conv33 = zext i8 %12 to i32
+  %arrayidx34 = getelementptr inbounds i8, i8* %p2, i64 3
+  %13 = load i8, i8* %arrayidx34, align 1
+  %conv35 = zext i8 %13 to i32
+  %arrayidx37 = getelementptr inbounds i8, i8* %p1, i64 7
+  %14 = load i8, i8* %arrayidx37, align 1
+  %conv38 = zext i8 %14 to i32
+  %arrayidx39 = getelementptr inbounds i8, i8* %p2, i64 7
+  %15 = load i8, i8* %arrayidx39, align 1
+  %conv40 = zext i8 %15 to i32
+  %add.ptr = getelementptr inbounds i8, i8* %p1, i64 %idx.ext
+  %16 = load i8, i8* %add.ptr, align 1
+  %conv.1 = zext i8 %16 to i32
+  %add.ptr64 = getelementptr inbounds i8, i8* %p2, i64 %idx.ext63
+  %17 = load i8, i8* %add.ptr64, align 1
+  %conv2.1 = zext i8 %17 to i32
+  %arrayidx3.1 = getelementptr inbounds i8, i8* %add.ptr, i64 4
+  %18 = load i8, i8* %arrayidx3.1, align 1
+  %conv4.1 = zext i8 %18 to i32
+  %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
+  %19 = load i8, i8* %arrayidx5.1, align 1
+  %conv6.1 = zext i8 %19 to i32
+  %arrayidx8.1 = getelementptr inbounds i8, i8* %add.ptr, i64 1
+  %20 = load i8, i8* %arrayidx8.1, align 1
+  %conv9.1 = zext i8 %20 to i32
+  %arrayidx10.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 1
+  %21 = load i8, i8* %arrayidx10.1, align 1
+  %conv11.1 = zext i8 %21 to i32
+  %arrayidx13.1 = getelementptr inbounds i8, i8* %add.ptr, i64 5
+  %22 = load i8, i8* %arrayidx13.1, align 1
+  %conv14.1 = zext i8 %22 to i32
+  %arrayidx15.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 5
+  %23 = load i8, i8* %arrayidx15.1, align 1
+  %conv16.1 = zext i8 %23 to i32
+  %arrayidx20.1 = getelementptr inbounds i8, i8* %add.ptr, i64 2
+  %24 = load i8, i8* %arrayidx20.1, align 1
+  %conv21.1 = zext i8 %24 to i32
+  %arrayidx22.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 2
+  %25 = load i8, i8* %arrayidx22.1, align 1
+  %conv23.1 = zext i8 %25 to i32
+  %arrayidx25.1 = getelementptr inbounds i8, i8* %add.ptr, i64 6
+  %26 = load i8, i8* %arrayidx25.1, align 1
+  %conv26.1 = zext i8 %26 to i32
+  %arrayidx27.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 6
+  %27 = load i8, i8* %arrayidx27.1, align 1
+  %conv28.1 = zext i8 %27 to i32
+  %arrayidx32.1 = getelementptr inbounds i8, i8* %add.ptr, i64 3
+  %28 = load i8, i8* %arrayidx32.1, align 1
+  %conv33.1 = zext i8 %28 to i32
+  %arrayidx34.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 3
+  %29 = load i8, i8* %arrayidx34.1, align 1
+  %conv35.1 = zext i8 %29 to i32
+  %arrayidx37.1 = getelementptr inbounds i8, i8* %add.ptr, i64 7
+  %30 = load i8, i8* %arrayidx37.1, align 1
+  %conv38.1 = zext i8 %30 to i32
+  %arrayidx39.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 7
+  %31 = load i8, i8* %arrayidx39.1, align 1
+  %conv40.1 = zext i8 %31 to i32
+  %add.ptr.1 = getelementptr inbounds i8, i8* %add.ptr, i64 %idx.ext
+  %32 = load i8, i8* %add.ptr.1, align 1
+  %conv.2 = zext i8 %32 to i32
+  %add.ptr64.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 %idx.ext63
+  %33 = load i8, i8* %add.ptr64.1, align 1
+  %conv2.2 = zext i8 %33 to i32
+  %arrayidx3.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 4
+  %34 = load i8, i8* %arrayidx3.2, align 1
+  %conv4.2 = zext i8 %34 to i32
+  %arrayidx5.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 4
+  %35 = load i8, i8* %arrayidx5.2, align 1
+  %conv6.2 = zext i8 %35 to i32
+  %arrayidx8.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 1
+  %36 = load i8, i8* %arrayidx8.2, align 1
+  %conv9.2 = zext i8 %36 to i32
+  %arrayidx10.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 1
+  %37 = load i8, i8* %arrayidx10.2, align 1
+  %conv11.2 = zext i8 %37 to i32
+  %arrayidx13.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 5
+  %38 = load i8, i8* %arrayidx13.2, align 1
+  %conv14.2 = zext i8 %38 to i32
+  %arrayidx15.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 5
+  %39 = load i8, i8* %arrayidx15.2, align 1
+  %conv16.2 = zext i8 %39 to i32
+  %arrayidx20.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 2
+  %40 = load i8, i8* %arrayidx20.2, align 1
+  %conv21.2 = zext i8 %40 to i32
+  %arrayidx22.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 2
+  %41 = load i8, i8* %arrayidx22.2, align 1
+  %conv23.2 = zext i8 %41 to i32
+  %arrayidx25.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 6
+  %42 = load i8, i8* %arrayidx25.2, align 1
+  %conv26.2 = zext i8 %42 to i32
+  %arrayidx27.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 6
+  %43 = load i8, i8* %arrayidx27.2, align 1
+  %conv28.2 = zext i8 %43 to i32
+  %arrayidx32.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 3
+  %44 = load i8, i8* %arrayidx32.2, align 1
+  %conv33.2 = zext i8 %44 to i32
+  %arrayidx34.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 3
+  %45 = load i8, i8* %arrayidx34.2, align 1
+  %conv35.2 = zext i8 %45 to i32
+  %arrayidx37.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 7
+  %46 = load i8, i8* %arrayidx37.2, align 1
+  %conv38.2 = zext i8 %46 to i32
+  %arrayidx39.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 7
+  %47 = load i8, i8* %arrayidx39.2, align 1
+  %conv40.2 = zext i8 %47 to i32
+  %add.ptr.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 %idx.ext
+  %48 = load i8, i8* %add.ptr.2, align 1
+  %conv.3 = zext i8 %48 to i32
+  %add.ptr64.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 %idx.ext63
+  %49 = load i8, i8* %add.ptr64.2, align 1
+  %conv2.3 = zext i8 %49 to i32
+  %arrayidx3.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 4
+  %50 = load i8, i8* %arrayidx3.3, align 1
+  %conv4.3 = zext i8 %50 to i32
+  %arrayidx5.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 4
+  %51 = load i8, i8* %arrayidx5.3, align 1
+  %conv6.3 = zext i8 %51 to i32
+  %arrayidx8.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 1
+  %52 = load i8, i8* %arrayidx8.3, align 1
+  %conv9.3 = zext i8 %52 to i32
+  %arrayidx10.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 1
+  %53 = load i8, i8* %arrayidx10.3, align 1
+  %conv11.3 = zext i8 %53 to i32
+  %arrayidx13.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 5
+  %54 = load i8, i8* %arrayidx13.3, align 1
+  %conv14.3 = zext i8 %54 to i32
+  %arrayidx15.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 5
+  %55 = load i8, i8* %arrayidx15.3, align 1
+  %conv16.3 = zext i8 %55 to i32
+  %arrayidx20.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 2
+  %56 = load i8, i8* %arrayidx20.3, align 1
+  %conv21.3 = zext i8 %56 to i32
+  %arrayidx22.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 2
+  %57 = load i8, i8* %arrayidx22.3, align 1
+  %conv23.3 = zext i8 %57 to i32
+  %arrayidx25.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 6
+  %58 = load i8, i8* %arrayidx25.3, align 1
+  %conv26.3 = zext i8 %58 to i32
+  %arrayidx27.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 6
+  %59 = load i8, i8* %arrayidx27.3, align 1
+  %conv28.3 = zext i8 %59 to i32
+  %arrayidx32.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 3
+  %60 = load i8, i8* %arrayidx32.3, align 1
+  %conv33.3 = zext i8 %60 to i32
+  %arrayidx34.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 3
+  %61 = load i8, i8* %arrayidx34.3, align 1
+  %conv35.3 = zext i8 %61 to i32
+  %arrayidx37.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 7
+  %62 = load i8, i8* %arrayidx37.3, align 1
+  %conv38.3 = zext i8 %62 to i32
+  %arrayidx39.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 7
+  %63 = load i8, i8* %arrayidx39.3, align 1
+  %conv40.3 = zext i8 %63 to i32
+
+  %m1 = mul i32 %conv, %conv4
+  %m2 = mul i32 %conv9, %conv14
+  %m3 = mul i32 %conv21, %conv26
+  %m4 = mul i32 %conv33, %conv38
+  %m5 = mul i32 %conv2, %conv6
+  %m6 = mul i32 %conv11, %conv16
+  %m7 = mul i32 %conv23, %conv28
+  %m8 = mul i32 %conv35, %conv40
+  %m9 = mul i32 %conv.1, %conv4.1
+  %m10 = mul i32 %conv9.1, %conv14.1
+  %m11 = mul i32 %conv21.1, %conv26.1
+  %m12 = mul i32 %conv33.1, %conv38.1
+  %m13 = mul i32 %conv2.1, %conv6.1
+  %m14 = mul i32 %conv11.1, %conv16.1
+  %m15 = mul i32 %conv23.1, %conv28.1
+  %m16 = mul i32 %conv35.1, %conv40.1
+
+  %dst1 = getelementptr inbounds i32, i32* %dst0, i64 1
+  %dst2 = getelementptr inbounds i32, i32* %dst0, i64 2
+  %dst3 = getelementptr inbounds i32, i32* %dst0, i64 3
+  %dst4 = getelementptr inbounds i32, i32* %dst0, i64 4
+  %dst5 = getelementptr inbounds i32, i32* %dst0, i64 5
+  %dst6 = getelementptr inbounds i32, i32* %dst0, i64 6
+  %dst7 = getelementptr inbounds i32, i32* %dst0, i64 7
+  %dst8 = getelementptr inbounds i32, i32* %dst0, i64 8
+  %dst9 = getelementptr inbounds i32, i32* %dst0, i64 9
+  %dst10 = getelementptr inbounds i32, i32* %dst0, i64 10
+  %dst11 = getelementptr inbounds i32, i32* %dst0, i64 11
+  %dst12 = getelementptr inbounds i32, i32* %dst0, i64 12
+  %dst13 = getelementptr inbounds i32, i32* %dst0, i64 13
+  %dst14 = getelementptr inbounds i32, i32* %dst0, i64 14
+  %dst15 = getelementptr inbounds i32, i32* %dst0, i64 15
+  store i32 %m1, i32* %dst0
+  store i32 %m2, i32* %dst1
+  store i32 %m3, i32* %dst2
+  store i32 %m4, i32* %dst3
+  store i32 %m5, i32* %dst4
+  store i32 %m6, i32* %dst5
+  store i32 %m7, i32* %dst6
+  store i32 %m8, i32* %dst7
+  store i32 %m9, i32* %dst8
+  store i32 %m10, i32* %dst9
+  store i32 %m11, i32* %dst10
+  store i32 %m12, i32* %dst11
+  store i32 %m13, i32* %dst12
+  store i32 %m14, i32* %dst13
+  store i32 %m15, i32* %dst14
+  store i32 %m16, i32* %dst15
+  ret void
+}
+
+define dso_local i32 @full(i8* nocapture noundef readonly %p1, i32 noundef %st1, i8* nocapture noundef readonly %p2, i32 noundef %st2) {
+; CHECK-LABEL: @full(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[ST1:%.*]] to i64
+; CHECK-NEXT:    [[IDX_EXT63:%.*]] = sext i32 [[ST2:%.*]] to i64
+; CHECK-NEXT:    [[TMP0:%.*]] = load i8, i8* [[P1:%.*]], align 1
+; CHECK-NEXT:    [[CONV:%.*]] = zext i8 [[TMP0]] to i32
+; CHECK-NEXT:    [[TMP1:%.*]] = load i8, i8* [[P2:%.*]], align 1
+; CHECK-NEXT:    [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
+; CHECK-NEXT:    [[SUB:%.*]] = sub nsw i32 [[CONV]], [[CONV2]]
+; CHECK-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 4
+; CHECK-NEXT:    [[TMP2:%.*]] = load i8, i8* [[ARRAYIDX3]], align 1
+; CHECK-NEXT:    [[CONV4:%.*]] = zext i8 [[TMP2]] to i32
+; CHECK-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 4
+; CHECK-NEXT:    [[TMP3:%.*]] = load i8, i8* [[ARRAYIDX5]], align 1
+; CHECK-NEXT:    [[CONV6:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT:    [[SUB7:%.*]] = sub nsw i32 [[CONV4]], [[CONV6]]
+; CHECK-NEXT:    [[SHL:%.*]] = shl nsw i32 [[SUB7]], 16
+; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[SHL]], [[SUB]]
+; CHECK-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 1
+; CHECK-NEXT:    [[TMP4:%.*]] = load i8, i8* [[ARRAYIDX8]], align 1
+; CHECK-NEXT:    [[CONV9:%.*]] = zext i8 [[TMP4]] to i32
+; CHECK-NEXT:    [[ARRAYIDX10:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 1
+; CHECK-NEXT:    [[TMP5:%.*]] = load i8, i8* [[ARRAYIDX10]], align 1
+; CHECK-NEXT:    [[CONV11:%.*]] = zext i8 [[TMP5]] to i32
+; CHECK-NEXT:    [[SUB12:%.*]] = sub nsw i32 [[CONV9]], [[CONV11]]
+; CHECK-NEXT:    [[ARRAYIDX13:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 5
+; CHECK-NEXT:    [[TMP6:%.*]] = load i8, i8* [[ARRAYIDX13]], align 1
+; CHECK-NEXT:    [[CONV14:%.*]] = zext i8 [[TMP6]] to i32
+; CHECK-NEXT:    [[ARRAYIDX15:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 5
+; CHECK-NEXT:    [[TMP7:%.*]] = load i8, i8* [[ARRAYIDX15]], align 1
+; CHECK-NEXT:    [[CONV16:%.*]] = zext i8 [[TMP7]] to i32
+; CHECK-NEXT:    [[SUB17:%.*]] = sub nsw i32 [[CONV14]], [[CONV16]]
+; CHECK-NEXT:    [[SHL18:%.*]] = shl nsw i32 [[SUB17]], 16
+; CHECK-NEXT:    [[ADD19:%.*]] = add nsw i32 [[SHL18]], [[SUB12]]
+; CHECK-NEXT:    [[ARRAYIDX20:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 2
+; CHECK-NEXT:    [[TMP8:%.*]] = load i8, i8* [[ARRAYIDX20]], align 1
+; CHECK-NEXT:    [[CONV21:%.*]] = zext i8 [[TMP8]] to i32
+; CHECK-NEXT:    [[ARRAYIDX22:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 2
+; CHECK-NEXT:    [[TMP9:%.*]] = load i8, i8* [[ARRAYIDX22]], align 1
+; CHECK-NEXT:    [[CONV23:%.*]] = zext i8 [[TMP9]] to i32
+; CHECK-NEXT:    [[SUB24:%.*]] = sub nsw i32 [[CONV21]], [[CONV23]]
+; CHECK-NEXT:    [[ARRAYIDX25:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 6
+; CHECK-NEXT:    [[TMP10:%.*]] = load i8, i8* [[ARRAYIDX25]], align 1
+; CHECK-NEXT:    [[CONV26:%.*]] = zext i8 [[TMP10]] to i32
+; CHECK-NEXT:    [[ARRAYIDX27:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 6
+; CHECK-NEXT:    [[TMP11:%.*]] = load i8, i8* [[ARRAYIDX27]], align 1
+; CHECK-NEXT:    [[CONV28:%.*]] = zext i8 [[TMP11]] to i32
+; CHECK-NEXT:    [[SUB29:%.*]] = sub nsw i32 [[CONV26]], [[CONV28]]
+; CHECK-NEXT:    [[SHL30:%.*]] = shl nsw i32 [[SUB29]], 16
+; CHECK-NEXT:    [[ADD31:%.*]] = add nsw i32 [[SHL30]], [[SUB24]]
+; CHECK-NEXT:    [[ARRAYIDX32:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 3
+; CHECK-NEXT:    [[TMP12:%.*]] = load i8, i8* [[ARRAYIDX32]], align 1
+; CHECK-NEXT:    [[CONV33:%.*]] = zext i8 [[TMP12]] to i32
+; CHECK-NEXT:    [[ARRAYIDX34:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 3
+; CHECK-NEXT:    [[TMP13:%.*]] = load i8, i8* [[ARRAYIDX34]], align 1
+; CHECK-NEXT:    [[CONV35:%.*]] = zext i8 [[TMP13]] to i32
+; CHECK-NEXT:    [[SUB36:%.*]] = sub nsw i32 [[CONV33]], [[CONV35]]
+; CHECK-NEXT:    [[ARRAYIDX37:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 7
+; CHECK-NEXT:    [[TMP14:%.*]] = load i8, i8* [[ARRAYIDX37]], align 1
+; CHECK-NEXT:    [[CONV38:%.*]] = zext i8 [[TMP14]] to i32
+; CHECK-NEXT:    [[ARRAYIDX39:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 7
+; CHECK-NEXT:    [[TMP15:%.*]] = load i8, i8* [[ARRAYIDX39]], align 1
+; CHECK-NEXT:    [[CONV40:%.*]] = zext i8 [[TMP15]] to i32
+; CHECK-NEXT:    [[SUB41:%.*]] = sub nsw i32 [[CONV38]], [[CONV40]]
+; CHECK-NEXT:    [[SHL42:%.*]] = shl nsw i32 [[SUB41]], 16
+; CHECK-NEXT:    [[ADD43:%.*]] = add nsw i32 [[SHL42]], [[SUB36]]
+; CHECK-NEXT:    [[ADD44:%.*]] = add nsw i32 [[ADD19]], [[ADD]]
+; CHECK-NEXT:    [[SUB45:%.*]] = sub nsw i32 [[ADD]], [[ADD19]]
+; CHECK-NEXT:    [[ADD46:%.*]] = add nsw i32 [[ADD43]], [[ADD31]]
+; CHECK-NEXT:    [[SUB47:%.*]] = sub nsw i32 [[ADD31]], [[ADD43]]
+; CHECK-NEXT:    [[ADD48:%.*]] = add nsw i32 [[ADD46]], [[ADD44]]
+; CHECK-NEXT:    [[SUB51:%.*]] = sub nsw i32 [[ADD44]], [[ADD46]]
+; CHECK-NEXT:    [[ADD55:%.*]] = add nsw i32 [[SUB47]], [[SUB45]]
+; CHECK-NEXT:    [[SUB59:%.*]] = sub nsw i32 [[SUB45]], [[SUB47]]
+; CHECK-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i8, i8* [[P1]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR64:%.*]] = getelementptr inbounds i8, i8* [[P2]], i64 [[IDX_EXT63]]
+; CHECK-NEXT:    [[TMP16:%.*]] = load i8, i8* [[ADD_PTR]], align 1
+; CHECK-NEXT:    [[CONV_1:%.*]] = zext i8 [[TMP16]] to i32
+; CHECK-NEXT:    [[TMP17:%.*]] = load i8, i8* [[ADD_PTR64]], align 1
+; CHECK-NEXT:    [[CONV2_1:%.*]] = zext i8 [[TMP17]] to i32
+; CHECK-NEXT:    [[SUB_1:%.*]] = sub nsw i32 [[CONV_1]], [[CONV2_1]]
+; CHECK-NEXT:    [[ARRAYIDX3_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 4
+; CHECK-NEXT:    [[TMP18:%.*]] = load i8, i8* [[ARRAYIDX3_1]], align 1
+; CHECK-NEXT:    [[CONV4_1:%.*]] = zext i8 [[TMP18]] to i32
+; CHECK-NEXT:    [[ARRAYIDX5_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 4
+; CHECK-NEXT:    [[TMP19:%.*]] = load i8, i8* [[ARRAYIDX5_1]], align 1
+; CHECK-NEXT:    [[CONV6_1:%.*]] = zext i8 [[TMP19]] to i32
+; CHECK-NEXT:    [[SUB7_1:%.*]] = sub nsw i32 [[CONV4_1]], [[CONV6_1]]
+; CHECK-NEXT:    [[SHL_1:%.*]] = shl nsw i32 [[SUB7_1]], 16
+; CHECK-NEXT:    [[ADD_1:%.*]] = add nsw i32 [[SHL_1]], [[SUB_1]]
+; CHECK-NEXT:    [[ARRAYIDX8_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 1
+; CHECK-NEXT:    [[TMP20:%.*]] = load i8, i8* [[ARRAYIDX8_1]], align 1
+; CHECK-NEXT:    [[CONV9_1:%.*]] = zext i8 [[TMP20]] to i32
+; CHECK-NEXT:    [[ARRAYIDX10_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 1
+; CHECK-NEXT:    [[TMP21:%.*]] = load i8, i8* [[ARRAYIDX10_1]], align 1
+; CHECK-NEXT:    [[CONV11_1:%.*]] = zext i8 [[TMP21]] to i32
+; CHECK-NEXT:    [[SUB12_1:%.*]] = sub nsw i32 [[CONV9_1]], [[CONV11_1]]
+; CHECK-NEXT:    [[ARRAYIDX13_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 5
+; CHECK-NEXT:    [[TMP22:%.*]] = load i8, i8* [[ARRAYIDX13_1]], align 1
+; CHECK-NEXT:    [[CONV14_1:%.*]] = zext i8 [[TMP22]] to i32
+; CHECK-NEXT:    [[ARRAYIDX15_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 5
+; CHECK-NEXT:    [[TMP23:%.*]] = load i8, i8* [[ARRAYIDX15_1]], align 1
+; CHECK-NEXT:    [[CONV16_1:%.*]] = zext i8 [[TMP23]] to i32
+; CHECK-NEXT:    [[SUB17_1:%.*]] = sub nsw i32 [[CONV14_1]], [[CONV16_1]]
+; CHECK-NEXT:    [[SHL18_1:%.*]] = shl nsw i32 [[SUB17_1]], 16
+; CHECK-NEXT:    [[ADD19_1:%.*]] = add nsw i32 [[SHL18_1]], [[SUB12_1]]
+; CHECK-NEXT:    [[ARRAYIDX20_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 2
+; CHECK-NEXT:    [[TMP24:%.*]] = load i8, i8* [[ARRAYIDX20_1]], align 1
+; CHECK-NEXT:    [[CONV21_1:%.*]] = zext i8 [[TMP24]] to i32
+; CHECK-NEXT:    [[ARRAYIDX22_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 2
+; CHECK-NEXT:    [[TMP25:%.*]] = load i8, i8* [[ARRAYIDX22_1]], align 1
+; CHECK-NEXT:    [[CONV23_1:%.*]] = zext i8 [[TMP25]] to i32
+; CHECK-NEXT:    [[SUB24_1:%.*]] = sub nsw i32 [[CONV21_1]], [[CONV23_1]]
+; CHECK-NEXT:    [[ARRAYIDX25_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 6
+; CHECK-NEXT:    [[TMP26:%.*]] = load i8, i8* [[ARRAYIDX25_1]], align 1
+; CHECK-NEXT:    [[CONV26_1:%.*]] = zext i8 [[TMP26]] to i32
+; CHECK-NEXT:    [[ARRAYIDX27_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 6
+; CHECK-NEXT:    [[TMP27:%.*]] = load i8, i8* [[ARRAYIDX27_1]], align 1
+; CHECK-NEXT:    [[CONV28_1:%.*]] = zext i8 [[TMP27]] to i32
+; CHECK-NEXT:    [[SUB29_1:%.*]] = sub nsw i32 [[CONV26_1]], [[CONV28_1]]
+; CHECK-NEXT:    [[SHL30_1:%.*]] = shl nsw i32 [[SUB29_1]], 16
+; CHECK-NEXT:    [[ADD31_1:%.*]] = add nsw i32 [[SHL30_1]], [[SUB24_1]]
+; CHECK-NEXT:    [[ARRAYIDX32_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 3
+; CHECK-NEXT:    [[TMP28:%.*]] = load i8, i8* [[ARRAYIDX32_1]], align 1
+; CHECK-NEXT:    [[CONV33_1:%.*]] = zext i8 [[TMP28]] to i32
+; CHECK-NEXT:    [[ARRAYIDX34_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 3
+; CHECK-NEXT:    [[TMP29:%.*]] = load i8, i8* [[ARRAYIDX34_1]], align 1
+; CHECK-NEXT:    [[CONV35_1:%.*]] = zext i8 [[TMP29]] to i32
+; CHECK-NEXT:    [[SUB36_1:%.*]] = sub nsw i32 [[CONV33_1]], [[CONV35_1]]
+; CHECK-NEXT:    [[ARRAYIDX37_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 7
+; CHECK-NEXT:    [[TMP30:%.*]] = load i8, i8* [[ARRAYIDX37_1]], align 1
+; CHECK-NEXT:    [[CONV38_1:%.*]] = zext i8 [[TMP30]] to i32
+; CHECK-NEXT:    [[ARRAYIDX39_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 7
+; CHECK-NEXT:    [[TMP31:%.*]] = load i8, i8* [[ARRAYIDX39_1]], align 1
+; CHECK-NEXT:    [[CONV40_1:%.*]] = zext i8 [[TMP31]] to i32
+; CHECK-NEXT:    [[SUB41_1:%.*]] = sub nsw i32 [[CONV38_1]], [[CONV40_1]]
+; CHECK-NEXT:    [[SHL42_1:%.*]] = shl nsw i32 [[SUB41_1]], 16
+; CHECK-NEXT:    [[ADD43_1:%.*]] = add nsw i32 [[SHL42_1]], [[SUB36_1]]
+; CHECK-NEXT:    [[ADD44_1:%.*]] = add nsw i32 [[ADD19_1]], [[ADD_1]]
+; CHECK-NEXT:    [[SUB45_1:%.*]] = sub nsw i32 [[ADD_1]], [[ADD19_1]]
+; CHECK-NEXT:    [[ADD46_1:%.*]] = add nsw i32 [[ADD43_1]], [[ADD31_1]]
+; CHECK-NEXT:    [[SUB47_1:%.*]] = sub nsw i32 [[ADD31_1]], [[ADD43_1]]
+; CHECK-NEXT:    [[ADD48_1:%.*]] = add nsw i32 [[ADD46_1]], [[ADD44_1]]
+; CHECK-NEXT:    [[SUB51_1:%.*]] = sub nsw i32 [[ADD44_1]], [[ADD46_1]]
+; CHECK-NEXT:    [[ADD55_1:%.*]] = add nsw i32 [[SUB47_1]], [[SUB45_1]]
+; CHECK-NEXT:    [[SUB59_1:%.*]] = sub nsw i32 [[SUB45_1]], [[SUB47_1]]
+; CHECK-NEXT:    [[ADD_PTR_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR64_1:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64]], i64 [[IDX_EXT63]]
+; CHECK-NEXT:    [[TMP32:%.*]] = load i8, i8* [[ADD_PTR_1]], align 1
+; CHECK-NEXT:    [[CONV_2:%.*]] = zext i8 [[TMP32]] to i32
+; CHECK-NEXT:    [[TMP33:%.*]] = load i8, i8* [[ADD_PTR64_1]], align 1
+; CHECK-NEXT:    [[CONV2_2:%.*]] = zext i8 [[TMP33]] to i32
+; CHECK-NEXT:    [[SUB_2:%.*]] = sub nsw i32 [[CONV_2]], [[CONV2_2]]
+; CHECK-NEXT:    [[ARRAYIDX3_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 4
+; CHECK-NEXT:    [[TMP34:%.*]] = load i8, i8* [[ARRAYIDX3_2]], align 1
+; CHECK-NEXT:    [[CONV4_2:%.*]] = zext i8 [[TMP34]] to i32
+; CHECK-NEXT:    [[ARRAYIDX5_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 4
+; CHECK-NEXT:    [[TMP35:%.*]] = load i8, i8* [[ARRAYIDX5_2]], align 1
+; CHECK-NEXT:    [[CONV6_2:%.*]] = zext i8 [[TMP35]] to i32
+; CHECK-NEXT:    [[SUB7_2:%.*]] = sub nsw i32 [[CONV4_2]], [[CONV6_2]]
+; CHECK-NEXT:    [[SHL_2:%.*]] = shl nsw i32 [[SUB7_2]], 16
+; CHECK-NEXT:    [[ADD_2:%.*]] = add nsw i32 [[SHL_2]], [[SUB_2]]
+; CHECK-NEXT:    [[ARRAYIDX8_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 1
+; CHECK-NEXT:    [[TMP36:%.*]] = load i8, i8* [[ARRAYIDX8_2]], align 1
+; CHECK-NEXT:    [[CONV9_2:%.*]] = zext i8 [[TMP36]] to i32
+; CHECK-NEXT:    [[ARRAYIDX10_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 1
+; CHECK-NEXT:    [[TMP37:%.*]] = load i8, i8* [[ARRAYIDX10_2]], align 1
+; CHECK-NEXT:    [[CONV11_2:%.*]] = zext i8 [[TMP37]] to i32
+; CHECK-NEXT:    [[SUB12_2:%.*]] = sub nsw i32 [[CONV9_2]], [[CONV11_2]]
+; CHECK-NEXT:    [[ARRAYIDX13_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 5
+; CHECK-NEXT:    [[TMP38:%.*]] = load i8, i8* [[ARRAYIDX13_2]], align 1
+; CHECK-NEXT:    [[CONV14_2:%.*]] = zext i8 [[TMP38]] to i32
+; CHECK-NEXT:    [[ARRAYIDX15_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 5
+; CHECK-NEXT:    [[TMP39:%.*]] = load i8, i8* [[ARRAYIDX15_2]], align 1
+; CHECK-NEXT:    [[CONV16_2:%.*]] = zext i8 [[TMP39]] to i32
+; CHECK-NEXT:    [[SUB17_2:%.*]] = sub nsw i32 [[CONV14_2]], [[CONV16_2]]
+; CHECK-NEXT:    [[SHL18_2:%.*]] = shl nsw i32 [[SUB17_2]], 16
+; CHECK-NEXT:    [[ADD19_2:%.*]] = add nsw i32 [[SHL18_2]], [[SUB12_2]]
+; CHECK-NEXT:    [[ARRAYIDX20_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 2
+; CHECK-NEXT:    [[TMP40:%.*]] = load i8, i8* [[ARRAYIDX20_2]], align 1
+; CHECK-NEXT:    [[CONV21_2:%.*]] = zext i8 [[TMP40]] to i32
+; CHECK-NEXT:    [[ARRAYIDX22_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 2
+; CHECK-NEXT:    [[TMP41:%.*]] = load i8, i8* [[ARRAYIDX22_2]], align 1
+; CHECK-NEXT:    [[CONV23_2:%.*]] = zext i8 [[TMP41]] to i32
+; CHECK-NEXT:    [[SUB24_2:%.*]] = sub nsw i32 [[CONV21_2]], [[CONV23_2]]
+; CHECK-NEXT:    [[ARRAYIDX25_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 6
+; CHECK-NEXT:    [[TMP42:%.*]] = load i8, i8* [[ARRAYIDX25_2]], align 1
+; CHECK-NEXT:    [[CONV26_2:%.*]] = zext i8 [[TMP42]] to i32
+; CHECK-NEXT:    [[ARRAYIDX27_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 6
+; CHECK-NEXT:    [[TMP43:%.*]] = load i8, i8* [[ARRAYIDX27_2]], align 1
+; CHECK-NEXT:    [[CONV28_2:%.*]] = zext i8 [[TMP43]] to i32
+; CHECK-NEXT:    [[SUB29_2:%.*]] = sub nsw i32 [[CONV26_2]], [[CONV28_2]]
+; CHECK-NEXT:    [[SHL30_2:%.*]] = shl nsw i32 [[SUB29_2]], 16
+; CHECK-NEXT:    [[ADD31_2:%.*]] = add nsw i32 [[SHL30_2]], [[SUB24_2]]
+; CHECK-NEXT:    [[ARRAYIDX32_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 3
+; CHECK-NEXT:    [[TMP44:%.*]] = load i8, i8* [[ARRAYIDX32_2]], align 1
+; CHECK-NEXT:    [[CONV33_2:%.*]] = zext i8 [[TMP44]] to i32
+; CHECK-NEXT:    [[ARRAYIDX34_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 3
+; CHECK-NEXT:    [[TMP45:%.*]] = load i8, i8* [[ARRAYIDX34_2]], align 1
+; CHECK-NEXT:    [[CONV35_2:%.*]] = zext i8 [[TMP45]] to i32
+; CHECK-NEXT:    [[SUB36_2:%.*]] = sub nsw i32 [[CONV33_2]], [[CONV35_2]]
+; CHECK-NEXT:    [[ARRAYIDX37_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 7
+; CHECK-NEXT:    [[TMP46:%.*]] = load i8, i8* [[ARRAYIDX37_2]], align 1
+; CHECK-NEXT:    [[CONV38_2:%.*]] = zext i8 [[TMP46]] to i32
+; CHECK-NEXT:    [[ARRAYIDX39_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 7
+; CHECK-NEXT:    [[TMP47:%.*]] = load i8, i8* [[ARRAYIDX39_2]], align 1
+; CHECK-NEXT:    [[CONV40_2:%.*]] = zext i8 [[TMP47]] to i32
+; CHECK-NEXT:    [[SUB41_2:%.*]] = sub nsw i32 [[CONV38_2]], [[CONV40_2]]
+; CHECK-NEXT:    [[SHL42_2:%.*]] = shl nsw i32 [[SUB41_2]], 16
+; CHECK-NEXT:    [[ADD43_2:%.*]] = add nsw i32 [[SHL42_2]], [[SUB36_2]]
+; CHECK-NEXT:    [[ADD44_2:%.*]] = add nsw i32 [[ADD19_2]], [[ADD_2]]
+; CHECK-NEXT:    [[SUB45_2:%.*]] = sub nsw i32 [[ADD_2]], [[ADD19_2]]
+; CHECK-NEXT:    [[ADD46_2:%.*]] = add nsw i32 [[ADD43_2]], [[ADD31_2]]
+; CHECK-NEXT:    [[SUB47_2:%.*]] = sub nsw i32 [[ADD31_2]], [[ADD43_2]]
+; CHECK-NEXT:    [[ADD48_2:%.*]] = add nsw i32 [[ADD46_2]], [[ADD44_2]]
+; CHECK-NEXT:    [[SUB51_2:%.*]] = sub nsw i32 [[ADD44_2]], [[ADD46_2]]
+; CHECK-NEXT:    [[ADD55_2:%.*]] = add nsw i32 [[SUB47_2]], [[SUB45_2]]
+; CHECK-NEXT:    [[SUB59_2:%.*]] = sub nsw i32 [[SUB45_2]], [[SUB47_2]]
+; CHECK-NEXT:    [[ADD_PTR_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_1]], i64 [[IDX_EXT]]
+; CHECK-NEXT:    [[ADD_PTR64_2:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_1]], i64 [[IDX_EXT63]]
+; CHECK-NEXT:    [[TMP48:%.*]] = load i8, i8* [[ADD_PTR_2]], align 1
+; CHECK-NEXT:    [[CONV_3:%.*]] = zext i8 [[TMP48]] to i32
+; CHECK-NEXT:    [[TMP49:%.*]] = load i8, i8* [[ADD_PTR64_2]], align 1
+; CHECK-NEXT:    [[CONV2_3:%.*]] = zext i8 [[TMP49]] to i32
+; CHECK-NEXT:    [[SUB_3:%.*]] = sub nsw i32 [[CONV_3]], [[CONV2_3]]
+; CHECK-NEXT:    [[ARRAYIDX3_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_2]], i64 4
+; CHECK-NEXT:    [[TMP50:%.*]] = load i8, i8* [[ARRAYIDX3_3]], align 1
+; CHECK-NEXT:    [[CONV4_3:%.*]] = zext i8 [[TMP50]] to i32
+; CHECK-NEXT:    [[ARRAYIDX5_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_2]], i64 4
+; CHECK-NEXT:    [[TMP51:%.*]] = load i8, i8* [[ARRAYIDX5_3]], align 1
+; CHECK-NEXT:    [[CONV6_3:%.*]] = zext i8 [[TMP51]] to i32
+; CHECK-NEXT:    [[SUB7_3:%.*]] = sub nsw i32 [[CONV4_3]], [[CONV6_3]]
+; CHECK-NEXT:    [[SHL_3:%.*]] = shl nsw i32 [[SUB7_3]], 16
+; CHECK-NEXT:    [[ADD_3:%.*]] = add nsw i32 [[SHL_3]], [[SUB_3]]
+; CHECK-NEXT:    [[ARRAYIDX8_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_2]], i64 1
+; CHECK-NEXT:    [[TMP52:%.*]] = load i8, i8* [[ARRAYIDX8_3]], align 1
+; CHECK-NEXT:    [[CONV9_3:%.*]] = zext i8 [[TMP52]] to i32
+; CHECK-NEXT:    [[ARRAYIDX10_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_2]], i64 1
+; CHECK-NEXT:    [[TMP53:%.*]] = load i8, i8* [[ARRAYIDX10_3]], align 1
+; CHECK-NEXT:    [[CONV11_3:%.*]] = zext i8 [[TMP53]] to i32
+; CHECK-NEXT:    [[SUB12_3:%.*]] = sub nsw i32 [[CONV9_3]], [[CONV11_3]]
+; CHECK-NEXT:    [[ARRAYIDX13_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_2]], i64 5
+; CHECK-NEXT:    [[TMP54:%.*]] = load i8, i8* [[ARRAYIDX13_3]], align 1
+; CHECK-NEXT:    [[CONV14_3:%.*]] = zext i8 [[TMP54]] to i32
+; CHECK-NEXT:    [[ARRAYIDX15_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_2]], i64 5
+; CHECK-NEXT:    [[TMP55:%.*]] = load i8, i8* [[ARRAYIDX15_3]], align 1
+; CHECK-NEXT:    [[CONV16_3:%.*]] = zext i8 [[TMP55]] to i32
+; CHECK-NEXT:    [[SUB17_3:%.*]] = sub nsw i32 [[CONV14_3]], [[CONV16_3]]
+; CHECK-NEXT:    [[SHL18_3:%.*]] = shl nsw i32 [[SUB17_3]], 16
+; CHECK-NEXT:    [[ADD19_3:%.*]] = add nsw i32 [[SHL18_3]], [[SUB12_3]]
+; CHECK-NEXT:    [[ARRAYIDX20_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_2]], i64 2
+; CHECK-NEXT:    [[TMP56:%.*]] = load i8, i8* [[ARRAYIDX20_3]], align 1
+; CHECK-NEXT:    [[CONV21_3:%.*]] = zext i8 [[TMP56]] to i32
+; CHECK-NEXT:    [[ARRAYIDX22_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_2]], i64 2
+; CHECK-NEXT:    [[TMP57:%.*]] = load i8, i8* [[ARRAYIDX22_3]], align 1
+; CHECK-NEXT:    [[CONV23_3:%.*]] = zext i8 [[TMP57]] to i32
+; CHECK-NEXT:    [[SUB24_3:%.*]] = sub nsw i32 [[CONV21_3]], [[CONV23_3]]
+; CHECK-NEXT:    [[ARRAYIDX25_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_2]], i64 6
+; CHECK-NEXT:    [[TMP58:%.*]] = load i8, i8* [[ARRAYIDX25_3]], align 1
+; CHECK-NEXT:    [[CONV26_3:%.*]] = zext i8 [[TMP58]] to i32
+; CHECK-NEXT:    [[ARRAYIDX27_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_2]], i64 6
+; CHECK-NEXT:    [[TMP59:%.*]] = load i8, i8* [[ARRAYIDX27_3]], align 1
+; CHECK-NEXT:    [[CONV28_3:%.*]] = zext i8 [[TMP59]] to i32
+; CHECK-NEXT:    [[SUB29_3:%.*]] = sub nsw i32 [[CONV26_3]], [[CONV28_3]]
+; CHECK-NEXT:    [[SHL30_3:%.*]] = shl nsw i32 [[SUB29_3]], 16
+; CHECK-NEXT:    [[ADD31_3:%.*]] = add nsw i32 [[SHL30_3]], [[SUB24_3]]
+; CHECK-NEXT:    [[ARRAYIDX32_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_2]], i64 3
+; CHECK-NEXT:    [[TMP60:%.*]] = load i8, i8* [[ARRAYIDX32_3]], align 1
+; CHECK-NEXT:    [[CONV33_3:%.*]] = zext i8 [[TMP60]] to i32
+; CHECK-NEXT:    [[ARRAYIDX34_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_2]], i64 3
+; CHECK-NEXT:    [[TMP61:%.*]] = load i8, i8* [[ARRAYIDX34_3]], align 1
+; CHECK-NEXT:    [[CONV35_3:%.*]] = zext i8 [[TMP61]] to i32
+; CHECK-NEXT:    [[SUB36_3:%.*]] = sub nsw i32 [[CONV33_3]], [[CONV35_3]]
+; CHECK-NEXT:    [[ARRAYIDX37_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR_2]], i64 7
+; CHECK-NEXT:    [[TMP62:%.*]] = load i8, i8* [[ARRAYIDX37_3]], align 1
+; CHECK-NEXT:    [[CONV38_3:%.*]] = zext i8 [[TMP62]] to i32
+; CHECK-NEXT:    [[ARRAYIDX39_3:%.*]] = getelementptr inbounds i8, i8* [[ADD_PTR64_2]], i64 7
+; CHECK-NEXT:    [[TMP63:%.*]] = load i8, i8* [[ARRAYIDX39_3]], align 1
+; CHECK-NEXT:    [[CONV40_3:%.*]] = zext i8 [[TMP63]] to i32
+; CHECK-NEXT:    [[SUB41_3:%.*]] = sub nsw i32 [[CONV38_3]], [[CONV40_3]]
+; CHECK-NEXT:    [[SHL42_3:%.*]] = shl nsw i32 [[SUB41_3]], 16
+; CHECK-NEXT:    [[ADD43_3:%.*]] = add nsw i32 [[SHL42_3]], [[SUB36_3]]
+; CHECK-NEXT:    [[ADD44_3:%.*]] = add nsw i32 [[ADD19_3]], [[ADD_3]]
+; CHECK-NEXT:    [[SUB45_3:%.*]] = sub nsw i32 [[ADD_3]], [[ADD19_3]]
+; CHECK-NEXT:    [[ADD46_3:%.*]] = add nsw i32 [[ADD43_3]], [[ADD31_3]]
+; CHECK-NEXT:    [[SUB47_3:%.*]] = sub nsw i32 [[ADD31_3]], [[ADD43_3]]
+; CHECK-NEXT:    [[ADD48_3:%.*]] = add nsw i32 [[ADD46_3]], [[ADD44_3]]
+; CHECK-NEXT:    [[SUB51_3:%.*]] = sub nsw i32 [[ADD44_3]], [[ADD46_3]]
+; CHECK-NEXT:    [[ADD55_3:%.*]] = add nsw i32 [[SUB47_3]], [[SUB45_3]]
+; CHECK-NEXT:    [[SUB59_3:%.*]] = sub nsw i32 [[SUB45_3]], [[SUB47_3]]
+; CHECK-NEXT:    [[ADD78:%.*]] = add nsw i32 [[ADD48_1]], [[ADD48]]
+; CHECK-NEXT:    [[SUB86:%.*]] = sub nsw i32 [[ADD48]], [[ADD48_1]]
+; CHECK-NEXT:    [[ADD94:%.*]] = add nsw i32 [[ADD48_3]], [[ADD48_2]]
+; CHECK-NEXT:    [[SUB102:%.*]] = sub nsw i32 [[ADD48_2]], [[ADD48_3]]
+; CHECK-NEXT:    [[ADD103:%.*]] = add nsw i32 [[ADD94]], [[ADD78]]
+; CHECK-NEXT:    [[SUB104:%.*]] = sub nsw i32 [[ADD78]], [[ADD94]]
+; CHECK-NEXT:    [[ADD105:%.*]] = add nsw i32 [[SUB102]], [[SUB86]]
+; CHECK-NEXT:    [[SUB106:%.*]] = sub nsw i32 [[SUB86]], [[SUB102]]
+; CHECK-NEXT:    [[SHR_I:%.*]] = lshr i32 [[ADD103]], 15
+; CHECK-NEXT:    [[AND_I:%.*]] = and i32 [[SHR_I]], 65537
+; CHECK-NEXT:    [[MUL_I:%.*]] = mul nuw i32 [[AND_I]], 65535
+; CHECK-NEXT:    [[ADD_I:%.*]] = add i32 [[MUL_I]], [[ADD103]]
+; CHECK-NEXT:    [[XOR_I:%.*]] = xor i32 [[ADD_I]], [[MUL_I]]
+; CHECK-NEXT:    [[SHR_I184:%.*]] = lshr i32 [[ADD105]], 15
+; CHECK-NEXT:    [[AND_I185:%.*]] = and i32 [[SHR_I184]], 65537
+; CHECK-NEXT:    [[MUL_I186:%.*]] = mul nuw i32 [[AND_I185]], 65535
+; CHECK-NEXT:    [[ADD_I187:%.*]] = add i32 [[MUL_I186]], [[ADD105]]
+; CHECK-NEXT:    [[XOR_I188:%.*]] = xor i32 [[ADD_I187]], [[MUL_I186]]
+; CHECK-NEXT:    [[SHR_I189:%.*]] = lshr i32 [[SUB104]], 15
+; CHECK-NEXT:    [[AND_I190:%.*]] = and i32 [[SHR_I189]], 65537
+; CHECK-NEXT:    [[MUL_I191:%.*]] = mul nuw i32 [[AND_I190]], 65535
+; CHECK-NEXT:    [[ADD_I192:%.*]] = add i32 [[MUL_I191]], [[SUB104]]
+; CHECK-NEXT:    [[XOR_I193:%.*]] = xor i32 [[ADD_I192]], [[MUL_I191]]
+; CHECK-NEXT:    [[SHR_I194:%.*]] = lshr i32 [[SUB106]], 15
+; CHECK-NEXT:    [[AND_I195:%.*]] = and i32 [[SHR_I194]], 65537
+; CHECK-NEXT:    [[MUL_I196:%.*]] = mul nuw i32 [[AND_I195]], 65535
+; CHECK-NEXT:    [[ADD_I197:%.*]] = add i32 [[MUL_I196]], [[SUB106]]
+; CHECK-NEXT:    [[XOR_I198:%.*]] = xor i32 [[ADD_I197]], [[MUL_I196]]
+; CHECK-NEXT:    [[ADD110:%.*]] = add i32 [[XOR_I188]], [[XOR_I]]
+; CHECK-NEXT:    [[ADD112:%.*]] = add i32 [[ADD110]], [[XOR_I193]]
+; CHECK-NEXT:    [[ADD113:%.*]] = add i32 [[ADD112]], [[XOR_I198]]
+; CHECK-NEXT:    [[ADD78_1:%.*]] = add nsw i32 [[ADD55_1]], [[ADD55]]
+; CHECK-NEXT:    [[SUB86_1:%.*]] = sub nsw i32 [[ADD55]], [[ADD55_1]]
+; CHECK-NEXT:    [[ADD94_1:%.*]] = add nsw i32 [[ADD55_3]], [[ADD55_2]]
+; CHECK-NEXT:    [[SUB102_1:%.*]] = sub nsw i32 [[ADD55_2]], [[ADD55_3]]
+; CHECK-NEXT:    [[ADD103_1:%.*]] = add nsw i32 [[ADD94_1]], [[ADD78_1]]
+; CHECK-NEXT:    [[SUB104_1:%.*]] = sub nsw i32 [[ADD78_1]], [[ADD94_1]]
+; CHECK-NEXT:    [[ADD105_1:%.*]] = add nsw i32 [[SUB102_1]], [[SUB86_1]]
+; CHECK-NEXT:    [[SUB106_1:%.*]] = sub nsw i32 [[SUB86_1]], [[SUB102_1]]
+; CHECK-NEXT:    [[SHR_I_1:%.*]] = lshr i32 [[ADD103_1]], 15
+; CHECK-NEXT:    [[AND_I_1:%.*]] = and i32 [[SHR_I_1]], 65537
+; CHECK-NEXT:    [[MUL_I_1:%.*]] = mul nuw i32 [[AND_I_1]], 65535
+; CHECK-NEXT:    [[ADD_I_1:%.*]] = add i32 [[MUL_I_1]], [[ADD103_1]]
+; CHECK-NEXT:    [[XOR_I_1:%.*]] = xor i32 [[ADD_I_1]], [[MUL_I_1]]
+; CHECK-NEXT:    [[SHR_I184_1:%.*]] = lshr i32 [[ADD105_1]], 15
+; CHECK-NEXT:    [[AND_I185_1:%.*]] = and i32 [[SHR_I184_1]], 65537
+; CHECK-NEXT:    [[MUL_I186_1:%.*]] = mul nuw i32 [[AND_I185_1]], 65535
+; CHECK-NEXT:    [[ADD_I187_1:%.*]] = add i32 [[MUL_I186_1]], [[ADD105_1]]
+; CHECK-NEXT:    [[XOR_I188_1:%.*]] = xor i32 [[ADD_I187_1]], [[MUL_I186_1]]
+; CHECK-NEXT:    [[SHR_I189_1:%.*]] = lshr i32 [[SUB104_1]], 15
+; CHECK-NEXT:    [[AND_I190_1:%.*]] = and i32 [[SHR_I189_1]], 65537
+; CHECK-NEXT:    [[MUL_I191_1:%.*]] = mul nuw i32 [[AND_I190_1]], 65535
+; CHECK-NEXT:    [[ADD_I192_1:%.*]] = add i32 [[MUL_I191_1]], [[SUB104_1]]
+; CHECK-NEXT:    [[XOR_I193_1:%.*]] = xor i32 [[ADD_I192_1]], [[MUL_I191_1]]
+; CHECK-NEXT:    [[SHR_I194_1:%.*]] = lshr i32 [[SUB106_1]], 15
+; CHECK-NEXT:    [[AND_I195_1:%.*]] = and i32 [[SHR_I194_1]], 65537
+; CHECK-NEXT:    [[MUL_I196_1:%.*]] = mul nuw i32 [[AND_I195_1]], 65535
+; CHECK-NEXT:    [[ADD_I197_1:%.*]] = add i32 [[MUL_I196_1]], [[SUB106_1]]
+; CHECK-NEXT:    [[XOR_I198_1:%.*]] = xor i32 [[ADD_I197_1]], [[MUL_I196_1]]
+; CHECK-NEXT:    [[ADD108_1:%.*]] = add i32 [[XOR_I188_1]], [[ADD113]]
+; CHECK-NEXT:    [[ADD110_1:%.*]] = add i32 [[ADD108_1]], [[XOR_I_1]]
+; CHECK-NEXT:    [[ADD112_1:%.*]] = add i32 [[ADD110_1]], [[XOR_I193_1]]
+; CHECK-NEXT:    [[ADD113_1:%.*]] = add i32 [[ADD112_1]], [[XOR_I198_1]]
+; CHECK-NEXT:    [[ADD78_2:%.*]] = add nsw i32 [[SUB51_1]], [[SUB51]]
+; CHECK-NEXT:    [[SUB86_2:%.*]] = sub nsw i32 [[SUB51]], [[SUB51_1]]
+; CHECK-NEXT:    [[ADD94_2:%.*]] = add nsw i32 [[SUB51_3]], [[SUB51_2]]
+; CHECK-NEXT:    [[SUB102_2:%.*]] = sub nsw i32 [[SUB51_2]], [[SUB51_3]]
+; CHECK-NEXT:    [[ADD103_2:%.*]] = add nsw i32 [[ADD94_2]], [[ADD78_2]]
+; CHECK-NEXT:    [[SUB104_2:%.*]] = sub nsw i32 [[ADD78_2]], [[ADD94_2]]
+; CHECK-NEXT:    [[ADD105_2:%.*]] = add nsw i32 [[SUB102_2]], [[SUB86_2]]
+; CHECK-NEXT:    [[SUB106_2:%.*]] = sub nsw i32 [[SUB86_2]], [[SUB102_2]]
+; CHECK-NEXT:    [[SHR_I_2:%.*]] = lshr i32 [[ADD103_2]], 15
+; CHECK-NEXT:    [[AND_I_2:%.*]] = and i32 [[SHR_I_2]], 65537
+; CHECK-NEXT:    [[MUL_I_2:%.*]] = mul nuw i32 [[AND_I_2]], 65535
+; CHECK-NEXT:    [[ADD_I_2:%.*]] = add i32 [[MUL_I_2]], [[ADD103_2]]
+; CHECK-NEXT:    [[XOR_I_2:%.*]] = xor i32 [[ADD_I_2]], [[MUL_I_2]]
+; CHECK-NEXT:    [[SHR_I184_2:%.*]] = lshr i32 [[ADD105_2]], 15
+; CHECK-NEXT:    [[AND_I185_2:%.*]] = and i32 [[SHR_I184_2]], 65537
+; CHECK-NEXT:    [[MUL_I186_2:%.*]] = mul nuw i32 [[AND_I185_2]], 65535
+; CHECK-NEXT:    [[ADD_I187_2:%.*]] = add i32 [[MUL_I186_2]], [[ADD105_2]]
+; CHECK-NEXT:    [[XOR_I188_2:%.*]] = xor i32 [[ADD_I187_2]], [[MUL_I186_2]]
+; CHECK-NEXT:    [[SHR_I189_2:%.*]] = lshr i32 [[SUB104_2]], 15
+; CHECK-NEXT:    [[AND_I190_2:%.*]] = and i32 [[SHR_I189_2]], 65537
+; CHECK-NEXT:    [[MUL_I191_2:%.*]] = mul nuw i32 [[AND_I190_2]], 65535
+; CHECK-NEXT:    [[ADD_I192_2:%.*]] = add i32 [[MUL_I191_2]], [[SUB104_2]]
+; CHECK-NEXT:    [[XOR_I193_2:%.*]] = xor i32 [[ADD_I192_2]], [[MUL_I191_2]]
+; CHECK-NEXT:    [[SHR_I194_2:%.*]] = lshr i32 [[SUB106_2]], 15
+; CHECK-NEXT:    [[AND_I195_2:%.*]] = and i32 [[SHR_I194_2]], 65537
+; CHECK-NEXT:    [[MUL_I196_2:%.*]] = mul nuw i32 [[AND_I195_2]], 65535
+; CHECK-NEXT:    [[ADD_I197_2:%.*]] = add i32 [[MUL_I196_2]], [[SUB106_2]]
+; CHECK-NEXT:    [[XOR_I198_2:%.*]] = xor i32 [[ADD_I197_2]], [[MUL_I196_2]]
+; CHECK-NEXT:    [[ADD108_2:%.*]] = add i32 [[XOR_I188_2]], [[ADD113_1]]
+; CHECK-NEXT:    [[ADD110_2:%.*]] = add i32 [[ADD108_2]], [[XOR_I_2]]
+; CHECK-NEXT:    [[ADD112_2:%.*]] = add i32 [[ADD110_2]], [[XOR_I193_2]]
+; CHECK-NEXT:    [[ADD113_2:%.*]] = add i32 [[ADD112_2]], [[XOR_I198_2]]
+; CHECK-NEXT:    [[ADD78_3:%.*]] = add nsw i32 [[SUB59_1]], [[SUB59]]
+; CHECK-NEXT:    [[SUB86_3:%.*]] = sub nsw i32 [[SUB59]], [[SUB59_1]]
+; CHECK-NEXT:    [[ADD94_3:%.*]] = add nsw i32 [[SUB59_3]], [[SUB59_2]]
+; CHECK-NEXT:    [[SUB102_3:%.*]] = sub nsw i32 [[SUB59_2]], [[SUB59_3]]
+; CHECK-NEXT:    [[ADD103_3:%.*]] = add nsw i32 [[ADD94_3]], [[ADD78_3]]
+; CHECK-NEXT:    [[SUB104_3:%.*]] = sub nsw i32 [[ADD78_3]], [[ADD94_3]]
+; CHECK-NEXT:    [[ADD105_3:%.*]] = add nsw i32 [[SUB102_3]], [[SUB86_3]]
+; CHECK-NEXT:    [[SUB106_3:%.*]] = sub nsw i32 [[SUB86_3]], [[SUB102_3]]
+; CHECK-NEXT:    [[SHR_I_3:%.*]] = lshr i32 [[ADD103_3]], 15
+; CHECK-NEXT:    [[AND_I_3:%.*]] = and i32 [[SHR_I_3]], 65537
+; CHECK-NEXT:    [[MUL_I_3:%.*]] = mul nuw i32 [[AND_I_3]], 65535
+; CHECK-NEXT:    [[ADD_I_3:%.*]] = add i32 [[MUL_I_3]], [[ADD103_3]]
+; CHECK-NEXT:    [[XOR_I_3:%.*]] = xor i32 [[ADD_I_3]], [[MUL_I_3]]
+; CHECK-NEXT:    [[SHR_I184_3:%.*]] = lshr i32 [[ADD105_3]], 15
+; CHECK-NEXT:    [[AND_I185_3:%.*]] = and i32 [[SHR_I184_3]], 65537
+; CHECK-NEXT:    [[MUL_I186_3:%.*]] = mul nuw i32 [[AND_I185_3]], 65535
+; CHECK-NEXT:    [[ADD_I187_3:%.*]] = add i32 [[MUL_I186_3]], [[ADD105_3]]
+; CHECK-NEXT:    [[XOR_I188_3:%.*]] = xor i32 [[ADD_I187_3]], [[MUL_I186_3]]
+; CHECK-NEXT:    [[SHR_I189_3:%.*]] = lshr i32 [[SUB104_3]], 15
+; CHECK-NEXT:    [[AND_I190_3:%.*]] = and i32 [[SHR_I189_3]], 65537
+; CHECK-NEXT:    [[MUL_I191_3:%.*]] = mul nuw i32 [[AND_I190_3]], 65535
+; CHECK-NEXT:    [[ADD_I192_3:%.*]] = add i32 [[MUL_I191_3]], [[SUB104_3]]
+; CHECK-NEXT:    [[XOR_I193_3:%.*]] = xor i32 [[ADD_I192_3]], [[MUL_I191_3]]
+; CHECK-NEXT:    [[SHR_I194_3:%.*]] = lshr i32 [[SUB106_3]], 15
+; CHECK-NEXT:    [[AND_I195_3:%.*]] = and i32 [[SHR_I194_3]], 65537
+; CHECK-NEXT:    [[MUL_I196_3:%.*]] = mul nuw i32 [[AND_I195_3]], 65535
+; CHECK-NEXT:    [[ADD_I197_3:%.*]] = add i32 [[MUL_I196_3]], [[SUB106_3]]
+; CHECK-NEXT:    [[XOR_I198_3:%.*]] = xor i32 [[ADD_I197_3]], [[MUL_I196_3]]
+; CHECK-NEXT:    [[ADD108_3:%.*]] = add i32 [[XOR_I188_3]], [[ADD113_2]]
+; CHECK-NEXT:    [[ADD110_3:%.*]] = add i32 [[ADD108_3]], [[XOR_I_3]]
+; CHECK-NEXT:    [[ADD112_3:%.*]] = add i32 [[ADD110_3]], [[XOR_I193_3]]
+; CHECK-NEXT:    [[ADD113_3:%.*]] = add i32 [[ADD112_3]], [[XOR_I198_3]]
+; CHECK-NEXT:    [[CONV118:%.*]] = and i32 [[ADD113_3]], 65535
+; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 [[ADD113_3]], 16
+; CHECK-NEXT:    [[ADD119:%.*]] = add nuw nsw i32 [[CONV118]], [[SHR]]
+; CHECK-NEXT:    [[SHR120:%.*]] = lshr i32 [[ADD119]], 1
+; CHECK-NEXT:    ret i32 [[SHR120]]
+;
+entry:
+  %idx.ext = sext i32 %st1 to i64
+  %idx.ext63 = sext i32 %st2 to i64
+  %0 = load i8, i8* %p1, align 1
+  %conv = zext i8 %0 to i32
+  %1 = load i8, i8* %p2, align 1
+  %conv2 = zext i8 %1 to i32
+  %sub = sub nsw i32 %conv, %conv2
+  %arrayidx3 = getelementptr inbounds i8, i8* %p1, i64 4
+  %2 = load i8, i8* %arrayidx3, align 1
+  %conv4 = zext i8 %2 to i32
+  %arrayidx5 = getelementptr inbounds i8, i8* %p2, i64 4
+  %3 = load i8, i8* %arrayidx5, align 1
+  %conv6 = zext i8 %3 to i32
+  %sub7 = sub nsw i32 %conv4, %conv6
+  %shl = shl nsw i32 %sub7, 16
+  %add = add nsw i32 %shl, %sub
+  %arrayidx8 = getelementptr inbounds i8, i8* %p1, i64 1
+  %4 = load i8, i8* %arrayidx8, align 1
+  %conv9 = zext i8 %4 to i32
+  %arrayidx10 = getelementptr inbounds i8, i8* %p2, i64 1
+  %5 = load i8, i8* %arrayidx10, align 1
+  %conv11 = zext i8 %5 to i32
+  %sub12 = sub nsw i32 %conv9, %conv11
+  %arrayidx13 = getelementptr inbounds i8, i8* %p1, i64 5
+  %6 = load i8, i8* %arrayidx13, align 1
+  %conv14 = zext i8 %6 to i32
+  %arrayidx15 = getelementptr inbounds i8, i8* %p2, i64 5
+  %7 = load i8, i8* %arrayidx15, align 1
+  %conv16 = zext i8 %7 to i32
+  %sub17 = sub nsw i32 %conv14, %conv16
+  %shl18 = shl nsw i32 %sub17, 16
+  %add19 = add nsw i32 %shl18, %sub12
+  %arrayidx20 = getelementptr inbounds i8, i8* %p1, i64 2
+  %8 = load i8, i8* %arrayidx20, align 1
+  %conv21 = zext i8 %8 to i32
+  %arrayidx22 = getelementptr inbounds i8, i8* %p2, i64 2
+  %9 = load i8, i8* %arrayidx22, align 1
+  %conv23 = zext i8 %9 to i32
+  %sub24 = sub nsw i32 %conv21, %conv23
+  %arrayidx25 = getelementptr inbounds i8, i8* %p1, i64 6
+  %10 = load i8, i8* %arrayidx25, align 1
+  %conv26 = zext i8 %10 to i32
+  %arrayidx27 = getelementptr inbounds i8, i8* %p2, i64 6
+  %11 = load i8, i8* %arrayidx27, align 1
+  %conv28 = zext i8 %11 to i32
+  %sub29 = sub nsw i32 %conv26, %conv28
+  %shl30 = shl nsw i32 %sub29, 16
+  %add31 = add nsw i32 %shl30, %sub24
+  %arrayidx32 = getelementptr inbounds i8, i8* %p1, i64 3
+  %12 = load i8, i8* %arrayidx32, align 1
+  %conv33 = zext i8 %12 to i32
+  %arrayidx34 = getelementptr inbounds i8, i8* %p2, i64 3
+  %13 = load i8, i8* %arrayidx34, align 1
+  %conv35 = zext i8 %13 to i32
+  %sub36 = sub nsw i32 %conv33, %conv35
+  %arrayidx37 = getelementptr inbounds i8, i8* %p1, i64 7
+  %14 = load i8, i8* %arrayidx37, align 1
+  %conv38 = zext i8 %14 to i32
+  %arrayidx39 = getelementptr inbounds i8, i8* %p2, i64 7
+  %15 = load i8, i8* %arrayidx39, align 1
+  %conv40 = zext i8 %15 to i32
+  %sub41 = sub nsw i32 %conv38, %conv40
+  %shl42 = shl nsw i32 %sub41, 16
+  %add43 = add nsw i32 %shl42, %sub36
+  %add44 = add nsw i32 %add19, %add
+  %sub45 = sub nsw i32 %add, %add19
+  %add46 = add nsw i32 %add43, %add31
+  %sub47 = sub nsw i32 %add31, %add43
+  %add48 = add nsw i32 %add46, %add44
+  %sub51 = sub nsw i32 %add44, %add46
+  %add55 = add nsw i32 %sub47, %sub45
+  %sub59 = sub nsw i32 %sub45, %sub47
+  %add.ptr = getelementptr inbounds i8, i8* %p1, i64 %idx.ext
+  %add.ptr64 = getelementptr inbounds i8, i8* %p2, i64 %idx.ext63
+  %16 = load i8, i8* %add.ptr, align 1
+  %conv.1 = zext i8 %16 to i32
+  %17 = load i8, i8* %add.ptr64, align 1
+  %conv2.1 = zext i8 %17 to i32
+  %sub.1 = sub nsw i32 %conv.1, %conv2.1
+  %arrayidx3.1 = getelementptr inbounds i8, i8* %add.ptr, i64 4
+  %18 = load i8, i8* %arrayidx3.1, align 1
+  %conv4.1 = zext i8 %18 to i32
+  %arrayidx5.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 4
+  %19 = load i8, i8* %arrayidx5.1, align 1
+  %conv6.1 = zext i8 %19 to i32
+  %sub7.1 = sub nsw i32 %conv4.1, %conv6.1
+  %shl.1 = shl nsw i32 %sub7.1, 16
+  %add.1 = add nsw i32 %shl.1, %sub.1
+  %arrayidx8.1 = getelementptr inbounds i8, i8* %add.ptr, i64 1
+  %20 = load i8, i8* %arrayidx8.1, align 1
+  %conv9.1 = zext i8 %20 to i32
+  %arrayidx10.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 1
+  %21 = load i8, i8* %arrayidx10.1, align 1
+  %conv11.1 = zext i8 %21 to i32
+  %sub12.1 = sub nsw i32 %conv9.1, %conv11.1
+  %arrayidx13.1 = getelementptr inbounds i8, i8* %add.ptr, i64 5
+  %22 = load i8, i8* %arrayidx13.1, align 1
+  %conv14.1 = zext i8 %22 to i32
+  %arrayidx15.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 5
+  %23 = load i8, i8* %arrayidx15.1, align 1
+  %conv16.1 = zext i8 %23 to i32
+  %sub17.1 = sub nsw i32 %conv14.1, %conv16.1
+  %shl18.1 = shl nsw i32 %sub17.1, 16
+  %add19.1 = add nsw i32 %shl18.1, %sub12.1
+  %arrayidx20.1 = getelementptr inbounds i8, i8* %add.ptr, i64 2
+  %24 = load i8, i8* %arrayidx20.1, align 1
+  %conv21.1 = zext i8 %24 to i32
+  %arrayidx22.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 2
+  %25 = load i8, i8* %arrayidx22.1, align 1
+  %conv23.1 = zext i8 %25 to i32
+  %sub24.1 = sub nsw i32 %conv21.1, %conv23.1
+  %arrayidx25.1 = getelementptr inbounds i8, i8* %add.ptr, i64 6
+  %26 = load i8, i8* %arrayidx25.1, align 1
+  %conv26.1 = zext i8 %26 to i32
+  %arrayidx27.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 6
+  %27 = load i8, i8* %arrayidx27.1, align 1
+  %conv28.1 = zext i8 %27 to i32
+  %sub29.1 = sub nsw i32 %conv26.1, %conv28.1
+  %shl30.1 = shl nsw i32 %sub29.1, 16
+  %add31.1 = add nsw i32 %shl30.1, %sub24.1
+  %arrayidx32.1 = getelementptr inbounds i8, i8* %add.ptr, i64 3
+  %28 = load i8, i8* %arrayidx32.1, align 1
+  %conv33.1 = zext i8 %28 to i32
+  %arrayidx34.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 3
+  %29 = load i8, i8* %arrayidx34.1, align 1
+  %conv35.1 = zext i8 %29 to i32
+  %sub36.1 = sub nsw i32 %conv33.1, %conv35.1
+  %arrayidx37.1 = getelementptr inbounds i8, i8* %add.ptr, i64 7
+  %30 = load i8, i8* %arrayidx37.1, align 1
+  %conv38.1 = zext i8 %30 to i32
+  %arrayidx39.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 7
+  %31 = load i8, i8* %arrayidx39.1, align 1
+  %conv40.1 = zext i8 %31 to i32
+  %sub41.1 = sub nsw i32 %conv38.1, %conv40.1
+  %shl42.1 = shl nsw i32 %sub41.1, 16
+  %add43.1 = add nsw i32 %shl42.1, %sub36.1
+  %add44.1 = add nsw i32 %add19.1, %add.1
+  %sub45.1 = sub nsw i32 %add.1, %add19.1
+  %add46.1 = add nsw i32 %add43.1, %add31.1
+  %sub47.1 = sub nsw i32 %add31.1, %add43.1
+  %add48.1 = add nsw i32 %add46.1, %add44.1
+  %sub51.1 = sub nsw i32 %add44.1, %add46.1
+  %add55.1 = add nsw i32 %sub47.1, %sub45.1
+  %sub59.1 = sub nsw i32 %sub45.1, %sub47.1
+  %add.ptr.1 = getelementptr inbounds i8, i8* %add.ptr, i64 %idx.ext
+  %add.ptr64.1 = getelementptr inbounds i8, i8* %add.ptr64, i64 %idx.ext63
+  %32 = load i8, i8* %add.ptr.1, align 1
+  %conv.2 = zext i8 %32 to i32
+  %33 = load i8, i8* %add.ptr64.1, align 1
+  %conv2.2 = zext i8 %33 to i32
+  %sub.2 = sub nsw i32 %conv.2, %conv2.2
+  %arrayidx3.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 4
+  %34 = load i8, i8* %arrayidx3.2, align 1
+  %conv4.2 = zext i8 %34 to i32
+  %arrayidx5.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 4
+  %35 = load i8, i8* %arrayidx5.2, align 1
+  %conv6.2 = zext i8 %35 to i32
+  %sub7.2 = sub nsw i32 %conv4.2, %conv6.2
+  %shl.2 = shl nsw i32 %sub7.2, 16
+  %add.2 = add nsw i32 %shl.2, %sub.2
+  %arrayidx8.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 1
+  %36 = load i8, i8* %arrayidx8.2, align 1
+  %conv9.2 = zext i8 %36 to i32
+  %arrayidx10.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 1
+  %37 = load i8, i8* %arrayidx10.2, align 1
+  %conv11.2 = zext i8 %37 to i32
+  %sub12.2 = sub nsw i32 %conv9.2, %conv11.2
+  %arrayidx13.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 5
+  %38 = load i8, i8* %arrayidx13.2, align 1
+  %conv14.2 = zext i8 %38 to i32
+  %arrayidx15.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 5
+  %39 = load i8, i8* %arrayidx15.2, align 1
+  %conv16.2 = zext i8 %39 to i32
+  %sub17.2 = sub nsw i32 %conv14.2, %conv16.2
+  %shl18.2 = shl nsw i32 %sub17.2, 16
+  %add19.2 = add nsw i32 %shl18.2, %sub12.2
+  %arrayidx20.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 2
+  %40 = load i8, i8* %arrayidx20.2, align 1
+  %conv21.2 = zext i8 %40 to i32
+  %arrayidx22.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 2
+  %41 = load i8, i8* %arrayidx22.2, align 1
+  %conv23.2 = zext i8 %41 to i32
+  %sub24.2 = sub nsw i32 %conv21.2, %conv23.2
+  %arrayidx25.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 6
+  %42 = load i8, i8* %arrayidx25.2, align 1
+  %conv26.2 = zext i8 %42 to i32
+  %arrayidx27.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 6
+  %43 = load i8, i8* %arrayidx27.2, align 1
+  %conv28.2 = zext i8 %43 to i32
+  %sub29.2 = sub nsw i32 %conv26.2, %conv28.2
+  %shl30.2 = shl nsw i32 %sub29.2, 16
+  %add31.2 = add nsw i32 %shl30.2, %sub24.2
+  %arrayidx32.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 3
+  %44 = load i8, i8* %arrayidx32.2, align 1
+  %conv33.2 = zext i8 %44 to i32
+  %arrayidx34.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 3
+  %45 = load i8, i8* %arrayidx34.2, align 1
+  %conv35.2 = zext i8 %45 to i32
+  %sub36.2 = sub nsw i32 %conv33.2, %conv35.2
+  %arrayidx37.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 7
+  %46 = load i8, i8* %arrayidx37.2, align 1
+  %conv38.2 = zext i8 %46 to i32
+  %arrayidx39.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 7
+  %47 = load i8, i8* %arrayidx39.2, align 1
+  %conv40.2 = zext i8 %47 to i32
+  %sub41.2 = sub nsw i32 %conv38.2, %conv40.2
+  %shl42.2 = shl nsw i32 %sub41.2, 16
+  %add43.2 = add nsw i32 %shl42.2, %sub36.2
+  %add44.2 = add nsw i32 %add19.2, %add.2
+  %sub45.2 = sub nsw i32 %add.2, %add19.2
+  %add46.2 = add nsw i32 %add43.2, %add31.2
+  %sub47.2 = sub nsw i32 %add31.2, %add43.2
+  %add48.2 = add nsw i32 %add46.2, %add44.2
+  %sub51.2 = sub nsw i32 %add44.2, %add46.2
+  %add55.2 = add nsw i32 %sub47.2, %sub45.2
+  %sub59.2 = sub nsw i32 %sub45.2, %sub47.2
+  %add.ptr.2 = getelementptr inbounds i8, i8* %add.ptr.1, i64 %idx.ext
+  %add.ptr64.2 = getelementptr inbounds i8, i8* %add.ptr64.1, i64 %idx.ext63
+  %48 = load i8, i8* %add.ptr.2, align 1
+  %conv.3 = zext i8 %48 to i32
+  %49 = load i8, i8* %add.ptr64.2, align 1
+  %conv2.3 = zext i8 %49 to i32
+  %sub.3 = sub nsw i32 %conv.3, %conv2.3
+  %arrayidx3.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 4
+  %50 = load i8, i8* %arrayidx3.3, align 1
+  %conv4.3 = zext i8 %50 to i32
+  %arrayidx5.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 4
+  %51 = load i8, i8* %arrayidx5.3, align 1
+  %conv6.3 = zext i8 %51 to i32
+  %sub7.3 = sub nsw i32 %conv4.3, %conv6.3
+  %shl.3 = shl nsw i32 %sub7.3, 16
+  %add.3 = add nsw i32 %shl.3, %sub.3
+  %arrayidx8.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 1
+  %52 = load i8, i8* %arrayidx8.3, align 1
+  %conv9.3 = zext i8 %52 to i32
+  %arrayidx10.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 1
+  %53 = load i8, i8* %arrayidx10.3, align 1
+  %conv11.3 = zext i8 %53 to i32
+  %sub12.3 = sub nsw i32 %conv9.3, %conv11.3
+  %arrayidx13.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 5
+  %54 = load i8, i8* %arrayidx13.3, align 1
+  %conv14.3 = zext i8 %54 to i32
+  %arrayidx15.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 5
+  %55 = load i8, i8* %arrayidx15.3, align 1
+  %conv16.3 = zext i8 %55 to i32
+  %sub17.3 = sub nsw i32 %conv14.3, %conv16.3
+  %shl18.3 = shl nsw i32 %sub17.3, 16
+  %add19.3 = add nsw i32 %shl18.3, %sub12.3
+  %arrayidx20.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 2
+  %56 = load i8, i8* %arrayidx20.3, align 1
+  %conv21.3 = zext i8 %56 to i32
+  %arrayidx22.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 2
+  %57 = load i8, i8* %arrayidx22.3, align 1
+  %conv23.3 = zext i8 %57 to i32
+  %sub24.3 = sub nsw i32 %conv21.3, %conv23.3
+  %arrayidx25.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 6
+  %58 = load i8, i8* %arrayidx25.3, align 1
+  %conv26.3 = zext i8 %58 to i32
+  %arrayidx27.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 6
+  %59 = load i8, i8* %arrayidx27.3, align 1
+  %conv28.3 = zext i8 %59 to i32
+  %sub29.3 = sub nsw i32 %conv26.3, %conv28.3
+  %shl30.3 = shl nsw i32 %sub29.3, 16
+  %add31.3 = add nsw i32 %shl30.3, %sub24.3
+  %arrayidx32.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 3
+  %60 = load i8, i8* %arrayidx32.3, align 1
+  %conv33.3 = zext i8 %60 to i32
+  %arrayidx34.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 3
+  %61 = load i8, i8* %arrayidx34.3, align 1
+  %conv35.3 = zext i8 %61 to i32
+  %sub36.3 = sub nsw i32 %conv33.3, %conv35.3
+  %arrayidx37.3 = getelementptr inbounds i8, i8* %add.ptr.2, i64 7
+  %62 = load i8, i8* %arrayidx37.3, align 1
+  %conv38.3 = zext i8 %62 to i32
+  %arrayidx39.3 = getelementptr inbounds i8, i8* %add.ptr64.2, i64 7
+  %63 = load i8, i8* %arrayidx39.3, align 1
+  %conv40.3 = zext i8 %63 to i32
+  %sub41.3 = sub nsw i32 %conv38.3, %conv40.3
+  %shl42.3 = shl nsw i32 %sub41.3, 16
+  %add43.3 = add nsw i32 %shl42.3, %sub36.3
+  %add44.3 = add nsw i32 %add19.3, %add.3
+  %sub45.3 = sub nsw i32 %add.3, %add19.3
+  %add46.3 = add nsw i32 %add43.3, %add31.3
+  %sub47.3 = sub nsw i32 %add31.3, %add43.3
+  %add48.3 = add nsw i32 %add46.3, %add44.3
+  %sub51.3 = sub nsw i32 %add44.3, %add46.3
+  %add55.3 = add nsw i32 %sub47.3, %sub45.3
+  %sub59.3 = sub nsw i32 %sub45.3, %sub47.3
+  %add78 = add nsw i32 %add48.1, %add48
+  %sub86 = sub nsw i32 %add48, %add48.1
+  %add94 = add nsw i32 %add48.3, %add48.2
+  %sub102 = sub nsw i32 %add48.2, %add48.3
+  %add103 = add nsw i32 %add94, %add78
+  %sub104 = sub nsw i32 %add78, %add94
+  %add105 = add nsw i32 %sub102, %sub86
+  %sub106 = sub nsw i32 %sub86, %sub102
+  %shr.i = lshr i32 %add103, 15
+  %and.i = and i32 %shr.i, 65537
+  %mul.i = mul nuw i32 %and.i, 65535
+  %add.i = add i32 %mul.i, %add103
+  %xor.i = xor i32 %add.i, %mul.i
+  %shr.i184 = lshr i32 %add105, 15
+  %and.i185 = and i32 %shr.i184, 65537
+  %mul.i186 = mul nuw i32 %and.i185, 65535
+  %add.i187 = add i32 %mul.i186, %add105
+  %xor.i188 = xor i32 %add.i187, %mul.i186
+  %shr.i189 = lshr i32 %sub104, 15
+  %and.i190 = and i32 %shr.i189, 65537
+  %mul.i191 = mul nuw i32 %and.i190, 65535
+  %add.i192 = add i32 %mul.i191, %sub104
+  %xor.i193 = xor i32 %add.i192, %mul.i191
+  %shr.i194 = lshr i32 %sub106, 15
+  %and.i195 = and i32 %shr.i194, 65537
+  %mul.i196 = mul nuw i32 %and.i195, 65535
+  %add.i197 = add i32 %mul.i196, %sub106
+  %xor.i198 = xor i32 %add.i197, %mul.i196
+  %add110 = add i32 %xor.i188, %xor.i
+  %add112 = add i32 %add110, %xor.i193
+  %add113 = add i32 %add112, %xor.i198
+  %add78.1 = add nsw i32 %add55.1, %add55
+  %sub86.1 = sub nsw i32 %add55, %add55.1
+  %add94.1 = add nsw i32 %add55.3, %add55.2
+  %sub102.1 = sub nsw i32 %add55.2, %add55.3
+  %add103.1 = add nsw i32 %add94.1, %add78.1
+  %sub104.1 = sub nsw i32 %add78.1, %add94.1
+  %add105.1 = add nsw i32 %sub102.1, %sub86.1
+  %sub106.1 = sub nsw i32 %sub86.1, %sub102.1
+  %shr.i.1 = lshr i32 %add103.1, 15
+  %and.i.1 = and i32 %shr.i.1, 65537
+  %mul.i.1 = mul nuw i32 %and.i.1, 65535
+  %add.i.1 = add i32 %mul.i.1, %add103.1
+  %xor.i.1 = xor i32 %add.i.1, %mul.i.1
+  %shr.i184.1 = lshr i32 %add105.1, 15
+  %and.i185.1 = and i32 %shr.i184.1, 65537
+  %mul.i186.1 = mul nuw i32 %and.i185.1, 65535
+  %add.i187.1 = add i32 %mul.i186.1, %add105.1
+  %xor.i188.1 = xor i32 %add.i187.1, %mul.i186.1
+  %shr.i189.1 = lshr i32 %sub104.1, 15
+  %and.i190.1 = and i32 %shr.i189.1, 65537
+  %mul.i191.1 = mul nuw i32 %and.i190.1, 65535
+  %add.i192.1 = add i32 %mul.i191.1, %sub104.1
+  %xor.i193.1 = xor i32 %add.i192.1, %mul.i191.1
+  %shr.i194.1 = lshr i32 %sub106.1, 15
+  %and.i195.1 = and i32 %shr.i194.1, 65537
+  %mul.i196.1 = mul nuw i32 %and.i195.1, 65535
+  %add.i197.1 = add i32 %mul.i196.1, %sub106.1
+  %xor.i198.1 = xor i32 %add.i197.1, %mul.i196.1
+  %add108.1 = add i32 %xor.i188.1, %add113
+  %add110.1 = add i32 %add108.1, %xor.i.1
+  %add112.1 = add i32 %add110.1, %xor.i193.1
+  %add113.1 = add i32 %add112.1, %xor.i198.1
+  %add78.2 = add nsw i32 %sub51.1, %sub51
+  %sub86.2 = sub nsw i32 %sub51, %sub51.1
+  %add94.2 = add nsw i32 %sub51.3, %sub51.2
+  %sub102.2 = sub nsw i32 %sub51.2, %sub51.3
+  %add103.2 = add nsw i32 %add94.2, %add78.2
+  %sub104.2 = sub nsw i32 %add78.2, %add94.2
+  %add105.2 = add nsw i32 %sub102.2, %sub86.2
+  %sub106.2 = sub nsw i32 %sub86.2, %sub102.2
+  %shr.i.2 = lshr i32 %add103.2, 15
+  %and.i.2 = and i32 %shr.i.2, 65537
+  %mul.i.2 = mul nuw i32 %and.i.2, 65535
+  %add.i.2 = add i32 %mul.i.2, %add103.2
+  %xor.i.2 = xor i32 %add.i.2, %mul.i.2
+  %shr.i184.2 = lshr i32 %add105.2, 15
+  %and.i185.2 = and i32 %shr.i184.2, 65537
+  %mul.i186.2 = mul nuw i32 %and.i185.2, 65535
+  %add.i187.2 = add i32 %mul.i186.2, %add105.2
+  %xor.i188.2 = xor i32 %add.i187.2, %mul.i186.2
+  %shr.i189.2 = lshr i32 %sub104.2, 15
+  %and.i190.2 = and i32 %shr.i189.2, 65537
+  %mul.i191.2 = mul nuw i32 %and.i190.2, 65535
+  %add.i192.2 = add i32 %mul.i191.2, %sub104.2
+  %xor.i193.2 = xor i32 %add.i192.2, %mul.i191.2
+  %shr.i194.2 = lshr i32 %sub106.2, 15
+  %and.i195.2 = and i32 %shr.i194.2, 65537
+  %mul.i196.2 = mul nuw i32 %and.i195.2, 65535
+  %add.i197.2 = add i32 %mul.i196.2, %sub106.2
+  %xor.i198.2 = xor i32 %add.i197.2, %mul.i196.2
+  %add108.2 = add i32 %xor.i188.2, %add113.1
+  %add110.2 = add i32 %add108.2, %xor.i.2
+  %add112.2 = add i32 %add110.2, %xor.i193.2
+  %add113.2 = add i32 %add112.2, %xor.i198.2
+  %add78.3 = add nsw i32 %sub59.1, %sub59
+  %sub86.3 = sub nsw i32 %sub59, %sub59.1
+  %add94.3 = add nsw i32 %sub59.3, %sub59.2
+  %sub102.3 = sub nsw i32 %sub59.2, %sub59.3
+  %add103.3 = add nsw i32 %add94.3, %add78.3
+  %sub104.3 = sub nsw i32 %add78.3, %add94.3
+  %add105.3 = add nsw i32 %sub102.3, %sub86.3
+  %sub106.3 = sub nsw i32 %sub86.3, %sub102.3
+  %shr.i.3 = lshr i32 %add103.3, 15
+  %and.i.3 = and i32 %shr.i.3, 65537
+  %mul.i.3 = mul nuw i32 %and.i.3, 65535
+  %add.i.3 = add i32 %mul.i.3, %add103.3
+  %xor.i.3 = xor i32 %add.i.3, %mul.i.3
+  %shr.i184.3 = lshr i32 %add105.3, 15
+  %and.i185.3 = and i32 %shr.i184.3, 65537
+  %mul.i186.3 = mul nuw i32 %and.i185.3, 65535
+  %add.i187.3 = add i32 %mul.i186.3, %add105.3
+  %xor.i188.3 = xor i32 %add.i187.3, %mul.i186.3
+  %shr.i189.3 = lshr i32 %sub104.3, 15
+  %and.i190.3 = and i32 %shr.i189.3, 65537
+  %mul.i191.3 = mul nuw i32 %and.i190.3, 65535
+  %add.i192.3 = add i32 %mul.i191.3, %sub104.3
+  %xor.i193.3 = xor i32 %add.i192.3, %mul.i191.3
+  %shr.i194.3 = lshr i32 %sub106.3, 15
+  %and.i195.3 = and i32 %shr.i194.3, 65537
+  %mul.i196.3 = mul nuw i32 %and.i195.3, 65535
+  %add.i197.3 = add i32 %mul.i196.3, %sub106.3
+  %xor.i198.3 = xor i32 %add.i197.3, %mul.i196.3
+  %add108.3 = add i32 %xor.i188.3, %add113.2
+  %add110.3 = add i32 %add108.3, %xor.i.3
+  %add112.3 = add i32 %add110.3, %xor.i193.3
+  %add113.3 = add i32 %add112.3, %xor.i198.3
+  %conv118 = and i32 %add113.3, 65535
+  %shr = lshr i32 %add113.3, 16
+  %add119 = add nuw nsw i32 %conv118, %shr
+  %shr120 = lshr i32 %add119, 1
+  ret i32 %shr120
+}


        


More information about the llvm-commits mailing list