[llvm] dee4d69 - [NFC][X86][LV] Add costmodel test coverage for interleaved i64/f64 load/store stride=6
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 4 10:58:14 PDT 2021
Author: Roman Lebedev
Date: 2021-10-04T20:57:35+03:00
New Revision: dee4d699b27de6d828c1e1fa3e6f5cb655f89e99
URL: https://github.com/llvm/llvm-project/commit/dee4d699b27de6d828c1e1fa3e6f5cb655f89e99
DIFF: https://github.com/llvm/llvm-project/commit/dee4d699b27de6d828c1e1fa3e6f5cb655f89e99.diff
LOG: [NFC][X86][LV] Add costmodel test coverage for interleaved i64/f64 load/store stride=6
Added:
llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll
llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll
llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-6.ll
llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-6.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll
new file mode 100644
index 000000000000..9ff11c8427f6
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-f64-stride-6.ll
@@ -0,0 +1,83 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x double] zeroinitializer, align 128
+ at B = global [1024 x i8] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load double, double* %in0, align 8
+; SSE2: LV: Found an estimated cost of 18 for VF 2 For instruction: %v0 = load double, double* %in0, align 8
+; SSE2: LV: Found an estimated cost of 36 for VF 4 For instruction: %v0 = load double, double* %in0, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load double, double* %in0, align 8
+; AVX1: LV: Found an estimated cost of 21 for VF 2 For instruction: %v0 = load double, double* %in0, align 8
+; AVX1: LV: Found an estimated cost of 48 for VF 4 For instruction: %v0 = load double, double* %in0, align 8
+; AVX1: LV: Found an estimated cost of 96 for VF 8 For instruction: %v0 = load double, double* %in0, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load double, double* %in0, align 8
+; AVX2: LV: Found an estimated cost of 21 for VF 2 For instruction: %v0 = load double, double* %in0, align 8
+; AVX2: LV: Found an estimated cost of 48 for VF 4 For instruction: %v0 = load double, double* %in0, align 8
+; AVX2: LV: Found an estimated cost of 96 for VF 8 For instruction: %v0 = load double, double* %in0, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 11 for VF 2 For instruction: %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 21 for VF 4 For instruction: %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 51 for VF 8 For instruction: %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 120 for VF 16 For instruction: %v0 = load double, double* %in0, align 8
+; AVX512: LV: Found an estimated cost of 240 for VF 32 For instruction: %v0 = load double, double* %in0, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction: %v0 = load double, double* %in0, align 8
+
+define void @test() {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+ %iv.0 = add nuw nsw i64 %iv, 0
+ %iv.1 = add nuw nsw i64 %iv, 1
+ %iv.2 = add nuw nsw i64 %iv, 2
+ %iv.3 = add nuw nsw i64 %iv, 3
+ %iv.4 = add nuw nsw i64 %iv, 4
+ %iv.5 = add nuw nsw i64 %iv, 5
+
+ %in0 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.0
+ %in1 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.1
+ %in2 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.2
+ %in3 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.3
+ %in4 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.4
+ %in5 = getelementptr inbounds [1024 x double], [1024 x double]* @A, i64 0, i64 %iv.5
+
+ %v0 = load double, double* %in0
+ %v1 = load double, double* %in1
+ %v2 = load double, double* %in2
+ %v3 = load double, double* %in3
+ %v4 = load double, double* %in4
+ %v5 = load double, double* %in5
+
+ %reduce.add.0 = fadd double %v0, %v1
+ %reduce.add.1 = fadd double %reduce.add.0, %v2
+ %reduce.add.2 = fadd double %reduce.add.1, %v3
+ %reduce.add.3 = fadd double %reduce.add.2, %v4
+ %reduce.add.4 = fadd double %reduce.add.3, %v5
+
+ %reduce.add.4.narrow = fptoui double %reduce.add.4 to i8
+
+ %out = getelementptr inbounds [1024 x i8], [1024 x i8]* @B, i64 0, i64 %iv.0
+ store i8 %reduce.add.4.narrow, i8* %out
+
+ %iv.next = add nuw nsw i64 %iv.0, 6
+ %cmp = icmp ult i64 %iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll
new file mode 100644
index 000000000000..2e586d7d11f4
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-load-i64-stride-6.ll
@@ -0,0 +1,83 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i64] zeroinitializer, align 128
+ at B = global [1024 x i8] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load i64, i64* %in0, align 8
+; SSE2: LV: Found an estimated cost of 42 for VF 2 For instruction: %v0 = load i64, i64* %in0, align 8
+; SSE2: LV: Found an estimated cost of 84 for VF 4 For instruction: %v0 = load i64, i64* %in0, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX1: LV: Found an estimated cost of 33 for VF 2 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX1: LV: Found an estimated cost of 78 for VF 4 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX1: LV: Found an estimated cost of 156 for VF 8 For instruction: %v0 = load i64, i64* %in0, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX2: LV: Found an estimated cost of 33 for VF 2 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX2: LV: Found an estimated cost of 78 for VF 4 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX2: LV: Found an estimated cost of 156 for VF 8 For instruction: %v0 = load i64, i64* %in0, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 11 for VF 2 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 21 for VF 4 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 51 for VF 8 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 120 for VF 16 For instruction: %v0 = load i64, i64* %in0, align 8
+; AVX512: LV: Found an estimated cost of 240 for VF 32 For instruction: %v0 = load i64, i64* %in0, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction: %v0 = load i64, i64* %in0, align 8
+
+define void @test() {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+ %iv.0 = add nuw nsw i64 %iv, 0
+ %iv.1 = add nuw nsw i64 %iv, 1
+ %iv.2 = add nuw nsw i64 %iv, 2
+ %iv.3 = add nuw nsw i64 %iv, 3
+ %iv.4 = add nuw nsw i64 %iv, 4
+ %iv.5 = add nuw nsw i64 %iv, 5
+
+ %in0 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.0
+ %in1 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.1
+ %in2 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.2
+ %in3 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.3
+ %in4 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.4
+ %in5 = getelementptr inbounds [1024 x i64], [1024 x i64]* @A, i64 0, i64 %iv.5
+
+ %v0 = load i64, i64* %in0
+ %v1 = load i64, i64* %in1
+ %v2 = load i64, i64* %in2
+ %v3 = load i64, i64* %in3
+ %v4 = load i64, i64* %in4
+ %v5 = load i64, i64* %in5
+
+ %reduce.add.0 = add i64 %v0, %v1
+ %reduce.add.1 = add i64 %reduce.add.0, %v2
+ %reduce.add.2 = add i64 %reduce.add.1, %v3
+ %reduce.add.3 = add i64 %reduce.add.2, %v4
+ %reduce.add.4 = add i64 %reduce.add.3, %v5
+
+ %reduce.add.4.narrow = trunc i64 %reduce.add.4 to i8
+
+ %out = getelementptr inbounds [1024 x i8], [1024 x i8]* @B, i64 0, i64 %iv.0
+ store i8 %reduce.add.4.narrow, i8* %out
+
+ %iv.next = add nuw nsw i64 %iv.0, 6
+ %cmp = icmp ult i64 %iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-6.ll
new file mode 100644
index 000000000000..8e1f9a7aa9c3
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-f64-stride-6.ll
@@ -0,0 +1,84 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i8] zeroinitializer, align 128
+ at B = global [1024 x double] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction: store double %v5, double* %out5, align 8
+; SSE2: LV: Found an estimated cost of 20 for VF 2 For instruction: store double %v5, double* %out5, align 8
+; SSE2: LV: Found an estimated cost of 40 for VF 4 For instruction: store double %v5, double* %out5, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction: store double %v5, double* %out5, align 8
+; AVX1: LV: Found an estimated cost of 21 for VF 2 For instruction: store double %v5, double* %out5, align 8
+; AVX1: LV: Found an estimated cost of 54 for VF 4 For instruction: store double %v5, double* %out5, align 8
+; AVX1: LV: Found an estimated cost of 108 for VF 8 For instruction: store double %v5, double* %out5, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: store double %v5, double* %out5, align 8
+; AVX2: LV: Found an estimated cost of 21 for VF 2 For instruction: store double %v5, double* %out5, align 8
+; AVX2: LV: Found an estimated cost of 54 for VF 4 For instruction: store double %v5, double* %out5, align 8
+; AVX2: LV: Found an estimated cost of 108 for VF 8 For instruction: store double %v5, double* %out5, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction: store double %v5, double* %out5, align 8
+; AVX512: LV: Found an estimated cost of 17 for VF 2 For instruction: store double %v5, double* %out5, align 8
+; AVX512: LV: Found an estimated cost of 25 for VF 4 For instruction: store double %v5, double* %out5, align 8
+; AVX512: LV: Found an estimated cost of 51 for VF 8 For instruction: store double %v5, double* %out5, align 8
+; AVX512: LV: Found an estimated cost of 102 for VF 16 For instruction: store double %v5, double* %out5, align 8
+; AVX512: LV: Found an estimated cost of 204 for VF 32 For instruction: store double %v5, double* %out5, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction: store double %v5, double* %out5, align 8
+
+define void @test() {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+ %iv.0 = add nuw nsw i64 %iv, 0
+ %iv.1 = add nuw nsw i64 %iv, 1
+ %iv.2 = add nuw nsw i64 %iv, 2
+ %iv.3 = add nuw nsw i64 %iv, 3
+ %iv.4 = add nuw nsw i64 %iv, 4
+ %iv.5 = add nuw nsw i64 %iv, 5
+
+ %in = getelementptr inbounds [1024 x i8], [1024 x i8]* @A, i64 0, i64 %iv.0
+ %v.narrow = load i8, i8* %in
+
+ %v = uitofp i8 %v.narrow to double
+
+ %v0 = fadd double %v, 0.0
+ %v1 = fadd double %v, 1.0
+ %v2 = fadd double %v, 2.0
+ %v3 = fadd double %v, 3.0
+ %v4 = fadd double %v, 4.0
+ %v5 = fadd double %v, 5.0
+
+ %out0 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.0
+ %out1 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.1
+ %out2 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.2
+ %out3 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.3
+ %out4 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.4
+ %out5 = getelementptr inbounds [1024 x double], [1024 x double]* @B, i64 0, i64 %iv.5
+
+ store double %v0, double* %out0
+ store double %v1, double* %out1
+ store double %v2, double* %out2
+ store double %v3, double* %out3
+ store double %v4, double* %out4
+ store double %v5, double* %out5
+
+ %iv.next = add nuw nsw i64 %iv.0, 6
+ %cmp = icmp ult i64 %iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+ ret void
+}
diff --git a/llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-6.ll b/llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-6.ll
new file mode 100644
index 000000000000..1a1b9ca555fc
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/interleaved-store-i64-stride-6.ll
@@ -0,0 +1,84 @@
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+sse2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,SSE2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX1
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx2 --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX2
+; RUN: opt -loop-vectorize -vectorizer-maximize-bandwidth -S -mattr=+avx512bw,+avx512vl --debug-only=loop-vectorize < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,AVX512
+; REQUIRES: asserts
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at A = global [1024 x i8] zeroinitializer, align 128
+ at B = global [1024 x i64] zeroinitializer, align 128
+
+; CHECK: LV: Checking a loop in "test"
+;
+; SSE2: LV: Found an estimated cost of 1 for VF 1 For instruction: store i64 %v5, i64* %out5, align 8
+; SSE2: LV: Found an estimated cost of 44 for VF 2 For instruction: store i64 %v5, i64* %out5, align 8
+; SSE2: LV: Found an estimated cost of 88 for VF 4 For instruction: store i64 %v5, i64* %out5, align 8
+;
+; AVX1: LV: Found an estimated cost of 1 for VF 1 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX1: LV: Found an estimated cost of 33 for VF 2 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX1: LV: Found an estimated cost of 78 for VF 4 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX1: LV: Found an estimated cost of 156 for VF 8 For instruction: store i64 %v5, i64* %out5, align 8
+;
+; AVX2: LV: Found an estimated cost of 1 for VF 1 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX2: LV: Found an estimated cost of 33 for VF 2 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX2: LV: Found an estimated cost of 78 for VF 4 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX2: LV: Found an estimated cost of 156 for VF 8 For instruction: store i64 %v5, i64* %out5, align 8
+;
+; AVX512: LV: Found an estimated cost of 1 for VF 1 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX512: LV: Found an estimated cost of 17 for VF 2 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX512: LV: Found an estimated cost of 25 for VF 4 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX512: LV: Found an estimated cost of 51 for VF 8 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX512: LV: Found an estimated cost of 102 for VF 16 For instruction: store i64 %v5, i64* %out5, align 8
+; AVX512: LV: Found an estimated cost of 204 for VF 32 For instruction: store i64 %v5, i64* %out5, align 8
+;
+; CHECK-NOT: LV: Found an estimated cost of {{[0-9]+}} for VF {{[0-9]+}} For instruction: store i64 %v5, i64* %out5, align 8
+
+define void @test() {
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+
+ %iv.0 = add nuw nsw i64 %iv, 0
+ %iv.1 = add nuw nsw i64 %iv, 1
+ %iv.2 = add nuw nsw i64 %iv, 2
+ %iv.3 = add nuw nsw i64 %iv, 3
+ %iv.4 = add nuw nsw i64 %iv, 4
+ %iv.5 = add nuw nsw i64 %iv, 5
+
+ %in = getelementptr inbounds [1024 x i8], [1024 x i8]* @A, i64 0, i64 %iv.0
+ %v.narrow = load i8, i8* %in
+
+ %v = zext i8 %v.narrow to i64
+
+ %v0 = add i64 %v, 0
+ %v1 = add i64 %v, 1
+ %v2 = add i64 %v, 2
+ %v3 = add i64 %v, 3
+ %v4 = add i64 %v, 4
+ %v5 = add i64 %v, 5
+
+ %out0 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.0
+ %out1 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.1
+ %out2 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.2
+ %out3 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.3
+ %out4 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.4
+ %out5 = getelementptr inbounds [1024 x i64], [1024 x i64]* @B, i64 0, i64 %iv.5
+
+ store i64 %v0, i64* %out0
+ store i64 %v1, i64* %out1
+ store i64 %v2, i64* %out2
+ store i64 %v3, i64* %out3
+ store i64 %v4, i64* %out4
+ store i64 %v5, i64* %out5
+
+ %iv.next = add nuw nsw i64 %iv.0, 6
+ %cmp = icmp ult i64 %iv.next, 1024
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup:
+ ret void
+}
More information about the llvm-commits
mailing list