[llvm] r283046 - [SLPVectorizer][X86] Added fcopysign tests
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Sat Oct 1 10:00:27 PDT 2016
Author: rksimon
Date: Sat Oct 1 12:00:26 2016
New Revision: 283046
URL: http://llvm.org/viewvc/llvm-project?rev=283046&view=rev
Log:
[SLPVectorizer][X86] Added fcopysign tests
Added:
llvm/trunk/test/Transforms/SLPVectorizer/X86/fcopysign.ll
Added: llvm/trunk/test/Transforms/SLPVectorizer/X86/fcopysign.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/fcopysign.ll?rev=283046&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/fcopysign.ll (added)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/fcopysign.ll Sat Oct 1 12:00:26 2016
@@ -0,0 +1,404 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -mtriple=x86_64-unknown -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=SSE
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=corei7-avx -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX256
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=bdver1 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX256
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=core-avx2 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX256
+; RUN: opt < %s -mtriple=x86_64-unknown -mcpu=skylake-avx512 -basicaa -slp-vectorizer -S | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512
+
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+ at srcA64 = common global [8 x double] zeroinitializer, align 64
+ at srcB64 = common global [8 x double] zeroinitializer, align 64
+ at srcC64 = common global [8 x double] zeroinitializer, align 64
+ at srcA32 = common global [16 x float] zeroinitializer, align 64
+ at srcB32 = common global [16 x float] zeroinitializer, align 64
+ at srcC32 = common global [16 x float] zeroinitializer, align 64
+ at dst64 = common global [8 x double] zeroinitializer, align 64
+ at dst32 = common global [16 x float] zeroinitializer, align 64
+
+declare float @llvm.copysign.f32(float, float)
+declare double @llvm.copysign.f64(double, double)
+
+;
+; CHECK
+;
+
+define void @fcopysign_2f64() #0 {
+; CHECK-LABEL: @fcopysign_2f64(
+; CHECK-NEXT: [[A0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 8
+; CHECK-NEXT: [[A1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 8
+; CHECK-NEXT: [[B0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 8
+; CHECK-NEXT: [[B1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 8
+; CHECK-NEXT: [[FCOPYSIGN0:%.*]] = call double @llvm.copysign.f64(double [[A0]], double [[B0]])
+; CHECK-NEXT: [[FCOPYSIGN1:%.*]] = call double @llvm.copysign.f64(double [[A1]], double [[B1]])
+; CHECK-NEXT: store double [[FCOPYSIGN0]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 8
+; CHECK-NEXT: store double [[FCOPYSIGN1]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 8
+; CHECK-NEXT: ret void
+;
+ %a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 8
+ %a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 8
+ %b0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 8
+ %b1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 8
+ %fcopysign0 = call double @llvm.copysign.f64(double %a0, double %b0)
+ %fcopysign1 = call double @llvm.copysign.f64(double %a1, double %b1)
+ store double %fcopysign0, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 8
+ store double %fcopysign1, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 8
+ ret void
+}
+
+define void @fcopysign_4f64() #0 {
+; CHECK-LABEL: @fcopysign_4f64(
+; CHECK-NEXT: [[A0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 8
+; CHECK-NEXT: [[A1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 8
+; CHECK-NEXT: [[A2:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2), align 8
+; CHECK-NEXT: [[A3:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 3), align 8
+; CHECK-NEXT: [[B0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 8
+; CHECK-NEXT: [[B1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 8
+; CHECK-NEXT: [[B2:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2), align 8
+; CHECK-NEXT: [[B3:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 3), align 8
+; CHECK-NEXT: [[FCOPYSIGN0:%.*]] = call double @llvm.copysign.f64(double [[A0]], double [[B0]])
+; CHECK-NEXT: [[FCOPYSIGN1:%.*]] = call double @llvm.copysign.f64(double [[A1]], double [[B1]])
+; CHECK-NEXT: [[FCOPYSIGN2:%.*]] = call double @llvm.copysign.f64(double [[A2]], double [[B2]])
+; CHECK-NEXT: [[FCOPYSIGN3:%.*]] = call double @llvm.copysign.f64(double [[A3]], double [[B3]])
+; CHECK-NEXT: store double [[FCOPYSIGN0]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 8
+; CHECK-NEXT: store double [[FCOPYSIGN1]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 8
+; CHECK-NEXT: store double [[FCOPYSIGN2]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2), align 8
+; CHECK-NEXT: store double [[FCOPYSIGN3]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 3), align 8
+; CHECK-NEXT: ret void
+;
+ %a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 8
+ %a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 8
+ %a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2), align 8
+ %a3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 3), align 8
+ %b0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 8
+ %b1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 8
+ %b2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2), align 8
+ %b3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 3), align 8
+ %fcopysign0 = call double @llvm.copysign.f64(double %a0, double %b0)
+ %fcopysign1 = call double @llvm.copysign.f64(double %a1, double %b1)
+ %fcopysign2 = call double @llvm.copysign.f64(double %a2, double %b2)
+ %fcopysign3 = call double @llvm.copysign.f64(double %a3, double %b3)
+ store double %fcopysign0, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 8
+ store double %fcopysign1, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 8
+ store double %fcopysign2, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2), align 8
+ store double %fcopysign3, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 3), align 8
+ ret void
+}
+
+define void @fcopysign_8f64() #0 {
+; CHECK-LABEL: @fcopysign_8f64(
+; CHECK-NEXT: [[A0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 4
+; CHECK-NEXT: [[A1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 4
+; CHECK-NEXT: [[A2:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2), align 4
+; CHECK-NEXT: [[A3:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 3), align 4
+; CHECK-NEXT: [[A4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 4), align 4
+; CHECK-NEXT: [[A5:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 5), align 4
+; CHECK-NEXT: [[A6:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 6), align 4
+; CHECK-NEXT: [[A7:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 7), align 4
+; CHECK-NEXT: [[B0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 4
+; CHECK-NEXT: [[B1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 4
+; CHECK-NEXT: [[B2:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2), align 4
+; CHECK-NEXT: [[B3:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 3), align 4
+; CHECK-NEXT: [[B4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 4), align 4
+; CHECK-NEXT: [[B5:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 5), align 4
+; CHECK-NEXT: [[B6:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 6), align 4
+; CHECK-NEXT: [[B7:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 7), align 4
+; CHECK-NEXT: [[FCOPYSIGN0:%.*]] = call double @llvm.copysign.f64(double [[A0]], double [[B0]])
+; CHECK-NEXT: [[FCOPYSIGN1:%.*]] = call double @llvm.copysign.f64(double [[A1]], double [[B1]])
+; CHECK-NEXT: [[FCOPYSIGN2:%.*]] = call double @llvm.copysign.f64(double [[A2]], double [[B2]])
+; CHECK-NEXT: [[FCOPYSIGN3:%.*]] = call double @llvm.copysign.f64(double [[A3]], double [[B3]])
+; CHECK-NEXT: [[FCOPYSIGN4:%.*]] = call double @llvm.copysign.f64(double [[A4]], double [[B4]])
+; CHECK-NEXT: [[FCOPYSIGN5:%.*]] = call double @llvm.copysign.f64(double [[A5]], double [[B5]])
+; CHECK-NEXT: [[FCOPYSIGN6:%.*]] = call double @llvm.copysign.f64(double [[A6]], double [[B6]])
+; CHECK-NEXT: [[FCOPYSIGN7:%.*]] = call double @llvm.copysign.f64(double [[A7]], double [[B7]])
+; CHECK-NEXT: store double [[FCOPYSIGN0]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 4
+; CHECK-NEXT: store double [[FCOPYSIGN1]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 4
+; CHECK-NEXT: store double [[FCOPYSIGN2]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2), align 4
+; CHECK-NEXT: store double [[FCOPYSIGN3]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 3), align 4
+; CHECK-NEXT: store double [[FCOPYSIGN4]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 4), align 4
+; CHECK-NEXT: store double [[FCOPYSIGN5]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 5), align 4
+; CHECK-NEXT: store double [[FCOPYSIGN6]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 6), align 4
+; CHECK-NEXT: store double [[FCOPYSIGN7]], double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 7), align 4
+; CHECK-NEXT: ret void
+;
+ %a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 0), align 4
+ %a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 1), align 4
+ %a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 2), align 4
+ %a3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 3), align 4
+ %a4 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 4), align 4
+ %a5 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 5), align 4
+ %a6 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 6), align 4
+ %a7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcA64, i32 0, i64 7), align 4
+ %b0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 0), align 4
+ %b1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 1), align 4
+ %b2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 2), align 4
+ %b3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 3), align 4
+ %b4 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 4), align 4
+ %b5 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 5), align 4
+ %b6 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 6), align 4
+ %b7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @srcB64, i32 0, i64 7), align 4
+ %fcopysign0 = call double @llvm.copysign.f64(double %a0, double %b0)
+ %fcopysign1 = call double @llvm.copysign.f64(double %a1, double %b1)
+ %fcopysign2 = call double @llvm.copysign.f64(double %a2, double %b2)
+ %fcopysign3 = call double @llvm.copysign.f64(double %a3, double %b3)
+ %fcopysign4 = call double @llvm.copysign.f64(double %a4, double %b4)
+ %fcopysign5 = call double @llvm.copysign.f64(double %a5, double %b5)
+ %fcopysign6 = call double @llvm.copysign.f64(double %a6, double %b6)
+ %fcopysign7 = call double @llvm.copysign.f64(double %a7, double %b7)
+ store double %fcopysign0, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 0), align 4
+ store double %fcopysign1, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 1), align 4
+ store double %fcopysign2, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 2), align 4
+ store double %fcopysign3, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 3), align 4
+ store double %fcopysign4, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 4), align 4
+ store double %fcopysign5, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 5), align 4
+ store double %fcopysign6, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 6), align 4
+ store double %fcopysign7, double* getelementptr inbounds ([8 x double], [8 x double]* @dst64, i32 0, i64 7), align 4
+ ret void
+}
+
+define void @fcopysign_4f32() #0 {
+; CHECK-LABEL: @fcopysign_4f32(
+; CHECK-NEXT: [[A0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
+; CHECK-NEXT: [[A1:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
+; CHECK-NEXT: [[A2:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
+; CHECK-NEXT: [[A3:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
+; CHECK-NEXT: [[B0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
+; CHECK-NEXT: [[B1:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
+; CHECK-NEXT: [[B2:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
+; CHECK-NEXT: [[B3:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
+; CHECK-NEXT: [[FCOPYSIGN0:%.*]] = call float @llvm.copysign.f32(float [[A0]], float [[B0]])
+; CHECK-NEXT: [[FCOPYSIGN1:%.*]] = call float @llvm.copysign.f32(float [[A1]], float [[B1]])
+; CHECK-NEXT: [[FCOPYSIGN2:%.*]] = call float @llvm.copysign.f32(float [[A2]], float [[B2]])
+; CHECK-NEXT: [[FCOPYSIGN3:%.*]] = call float @llvm.copysign.f32(float [[A3]], float [[B3]])
+; CHECK-NEXT: store float [[FCOPYSIGN0]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN1]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN2]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN3]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
+; CHECK-NEXT: ret void
+;
+ %a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
+ %a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
+ %a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
+ %a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
+ %b0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
+ %b1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
+ %b2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
+ %b3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
+ %fcopysign0 = call float @llvm.copysign.f32(float %a0, float %b0)
+ %fcopysign1 = call float @llvm.copysign.f32(float %a1, float %b1)
+ %fcopysign2 = call float @llvm.copysign.f32(float %a2, float %b2)
+ %fcopysign3 = call float @llvm.copysign.f32(float %a3, float %b3)
+ store float %fcopysign0, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
+ store float %fcopysign1, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
+ store float %fcopysign2, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
+ store float %fcopysign3, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
+ ret void
+}
+
+define void @fcopysign_8f32() #0 {
+; CHECK-LABEL: @fcopysign_8f32(
+; CHECK-NEXT: [[A0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
+; CHECK-NEXT: [[A1:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
+; CHECK-NEXT: [[A2:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
+; CHECK-NEXT: [[A3:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
+; CHECK-NEXT: [[A4:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4), align 4
+; CHECK-NEXT: [[A5:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 5), align 4
+; CHECK-NEXT: [[A6:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 6), align 4
+; CHECK-NEXT: [[A7:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 7), align 4
+; CHECK-NEXT: [[B0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
+; CHECK-NEXT: [[B1:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
+; CHECK-NEXT: [[B2:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
+; CHECK-NEXT: [[B3:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
+; CHECK-NEXT: [[B4:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4), align 4
+; CHECK-NEXT: [[B5:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 5), align 4
+; CHECK-NEXT: [[B6:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 6), align 4
+; CHECK-NEXT: [[B7:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 7), align 4
+; CHECK-NEXT: [[FCOPYSIGN0:%.*]] = call float @llvm.copysign.f32(float [[A0]], float [[B0]])
+; CHECK-NEXT: [[FCOPYSIGN1:%.*]] = call float @llvm.copysign.f32(float [[A1]], float [[B1]])
+; CHECK-NEXT: [[FCOPYSIGN2:%.*]] = call float @llvm.copysign.f32(float [[A2]], float [[B2]])
+; CHECK-NEXT: [[FCOPYSIGN3:%.*]] = call float @llvm.copysign.f32(float [[A3]], float [[B3]])
+; CHECK-NEXT: [[FCOPYSIGN4:%.*]] = call float @llvm.copysign.f32(float [[A4]], float [[B4]])
+; CHECK-NEXT: [[FCOPYSIGN5:%.*]] = call float @llvm.copysign.f32(float [[A5]], float [[B5]])
+; CHECK-NEXT: [[FCOPYSIGN6:%.*]] = call float @llvm.copysign.f32(float [[A6]], float [[B6]])
+; CHECK-NEXT: [[FCOPYSIGN7:%.*]] = call float @llvm.copysign.f32(float [[A7]], float [[B7]])
+; CHECK-NEXT: store float [[FCOPYSIGN0]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN1]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN2]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN3]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN4]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN5]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 5), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN6]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 6), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN7]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 7), align 4
+; CHECK-NEXT: ret void
+;
+ %a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
+ %a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
+ %a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
+ %a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
+ %a4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4), align 4
+ %a5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 5), align 4
+ %a6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 6), align 4
+ %a7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 7), align 4
+ %b0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
+ %b1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
+ %b2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
+ %b3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
+ %b4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4), align 4
+ %b5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 5), align 4
+ %b6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 6), align 4
+ %b7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 7), align 4
+ %fcopysign0 = call float @llvm.copysign.f32(float %a0, float %b0)
+ %fcopysign1 = call float @llvm.copysign.f32(float %a1, float %b1)
+ %fcopysign2 = call float @llvm.copysign.f32(float %a2, float %b2)
+ %fcopysign3 = call float @llvm.copysign.f32(float %a3, float %b3)
+ %fcopysign4 = call float @llvm.copysign.f32(float %a4, float %b4)
+ %fcopysign5 = call float @llvm.copysign.f32(float %a5, float %b5)
+ %fcopysign6 = call float @llvm.copysign.f32(float %a6, float %b6)
+ %fcopysign7 = call float @llvm.copysign.f32(float %a7, float %b7)
+ store float %fcopysign0, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
+ store float %fcopysign1, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
+ store float %fcopysign2, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
+ store float %fcopysign3, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
+ store float %fcopysign4, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4), align 4
+ store float %fcopysign5, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 5), align 4
+ store float %fcopysign6, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 6), align 4
+ store float %fcopysign7, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 7), align 4
+ ret void
+}
+
+define void @fcopysign_16f32() #0 {
+; CHECK-LABEL: @fcopysign_16f32(
+; CHECK-NEXT: [[A0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
+; CHECK-NEXT: [[A1:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
+; CHECK-NEXT: [[A2:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
+; CHECK-NEXT: [[A3:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
+; CHECK-NEXT: [[A4:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4), align 4
+; CHECK-NEXT: [[A5:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 5), align 4
+; CHECK-NEXT: [[A6:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 6), align 4
+; CHECK-NEXT: [[A7:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 7), align 4
+; CHECK-NEXT: [[A8:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 8), align 4
+; CHECK-NEXT: [[A9:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 9), align 4
+; CHECK-NEXT: [[A10:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 10), align 4
+; CHECK-NEXT: [[A11:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 11), align 4
+; CHECK-NEXT: [[A12:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 12), align 4
+; CHECK-NEXT: [[A13:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 13), align 4
+; CHECK-NEXT: [[A14:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 14), align 4
+; CHECK-NEXT: [[A15:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 15), align 4
+; CHECK-NEXT: [[B0:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
+; CHECK-NEXT: [[B1:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
+; CHECK-NEXT: [[B2:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
+; CHECK-NEXT: [[B3:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
+; CHECK-NEXT: [[B4:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4), align 4
+; CHECK-NEXT: [[B5:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 5), align 4
+; CHECK-NEXT: [[B6:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 6), align 4
+; CHECK-NEXT: [[B7:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 7), align 4
+; CHECK-NEXT: [[B8:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 8), align 4
+; CHECK-NEXT: [[B9:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 9), align 4
+; CHECK-NEXT: [[B10:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 10), align 4
+; CHECK-NEXT: [[B11:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 11), align 4
+; CHECK-NEXT: [[B12:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 12), align 4
+; CHECK-NEXT: [[B13:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 13), align 4
+; CHECK-NEXT: [[B14:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 14), align 4
+; CHECK-NEXT: [[B15:%.*]] = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 15), align 4
+; CHECK-NEXT: [[FCOPYSIGN0:%.*]] = call float @llvm.copysign.f32(float [[A0]], float [[B0]])
+; CHECK-NEXT: [[FCOPYSIGN1:%.*]] = call float @llvm.copysign.f32(float [[A1]], float [[B1]])
+; CHECK-NEXT: [[FCOPYSIGN2:%.*]] = call float @llvm.copysign.f32(float [[A2]], float [[B2]])
+; CHECK-NEXT: [[FCOPYSIGN3:%.*]] = call float @llvm.copysign.f32(float [[A3]], float [[B3]])
+; CHECK-NEXT: [[FCOPYSIGN4:%.*]] = call float @llvm.copysign.f32(float [[A4]], float [[B4]])
+; CHECK-NEXT: [[FCOPYSIGN5:%.*]] = call float @llvm.copysign.f32(float [[A5]], float [[B5]])
+; CHECK-NEXT: [[FCOPYSIGN6:%.*]] = call float @llvm.copysign.f32(float [[A6]], float [[B6]])
+; CHECK-NEXT: [[FCOPYSIGN7:%.*]] = call float @llvm.copysign.f32(float [[A7]], float [[B7]])
+; CHECK-NEXT: [[FCOPYSIGN8:%.*]] = call float @llvm.copysign.f32(float [[A8]], float [[B8]])
+; CHECK-NEXT: [[FCOPYSIGN9:%.*]] = call float @llvm.copysign.f32(float [[A9]], float [[B9]])
+; CHECK-NEXT: [[FCOPYSIGN10:%.*]] = call float @llvm.copysign.f32(float [[A10]], float [[B10]])
+; CHECK-NEXT: [[FCOPYSIGN11:%.*]] = call float @llvm.copysign.f32(float [[A11]], float [[B11]])
+; CHECK-NEXT: [[FCOPYSIGN12:%.*]] = call float @llvm.copysign.f32(float [[A12]], float [[B12]])
+; CHECK-NEXT: [[FCOPYSIGN13:%.*]] = call float @llvm.copysign.f32(float [[A13]], float [[B13]])
+; CHECK-NEXT: [[FCOPYSIGN14:%.*]] = call float @llvm.copysign.f32(float [[A14]], float [[B14]])
+; CHECK-NEXT: [[FCOPYSIGN15:%.*]] = call float @llvm.copysign.f32(float [[A15]], float [[B15]])
+; CHECK-NEXT: store float [[FCOPYSIGN0]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN1]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN2]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN3]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN4]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN5]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 5), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN6]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 6), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN7]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 7), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN8]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 8), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN9]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 9), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN10]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 10), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN11]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 11), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN12]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 12), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN13]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 13), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN14]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 14), align 4
+; CHECK-NEXT: store float [[FCOPYSIGN15]], float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 15), align 4
+; CHECK-NEXT: ret void
+;
+ %a0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 0), align 4
+ %a1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 1), align 4
+ %a2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 2), align 4
+ %a3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 3), align 4
+ %a4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 4), align 4
+ %a5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 5), align 4
+ %a6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 6), align 4
+ %a7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 7), align 4
+ %a8 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 8), align 4
+ %a9 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 9), align 4
+ %a10 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 10), align 4
+ %a11 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 11), align 4
+ %a12 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 12), align 4
+ %a13 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 13), align 4
+ %a14 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 14), align 4
+ %a15 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcA32, i32 0, i64 15), align 4
+ %b0 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 0), align 4
+ %b1 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 1), align 4
+ %b2 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 2), align 4
+ %b3 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 3), align 4
+ %b4 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 4), align 4
+ %b5 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 5), align 4
+ %b6 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 6), align 4
+ %b7 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 7), align 4
+ %b8 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 8), align 4
+ %b9 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 9), align 4
+ %b10 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 10), align 4
+ %b11 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 11), align 4
+ %b12 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 12), align 4
+ %b13 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 13), align 4
+ %b14 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 14), align 4
+ %b15 = load float, float* getelementptr inbounds ([16 x float], [16 x float]* @srcB32, i32 0, i64 15), align 4
+ %fcopysign0 = call float @llvm.copysign.f32(float %a0 , float %b0 )
+ %fcopysign1 = call float @llvm.copysign.f32(float %a1 , float %b1 )
+ %fcopysign2 = call float @llvm.copysign.f32(float %a2 , float %b2 )
+ %fcopysign3 = call float @llvm.copysign.f32(float %a3 , float %b3 )
+ %fcopysign4 = call float @llvm.copysign.f32(float %a4 , float %b4 )
+ %fcopysign5 = call float @llvm.copysign.f32(float %a5 , float %b5 )
+ %fcopysign6 = call float @llvm.copysign.f32(float %a6 , float %b6 )
+ %fcopysign7 = call float @llvm.copysign.f32(float %a7 , float %b7 )
+ %fcopysign8 = call float @llvm.copysign.f32(float %a8 , float %b8 )
+ %fcopysign9 = call float @llvm.copysign.f32(float %a9 , float %b9 )
+ %fcopysign10 = call float @llvm.copysign.f32(float %a10, float %b10)
+ %fcopysign11 = call float @llvm.copysign.f32(float %a11, float %b11)
+ %fcopysign12 = call float @llvm.copysign.f32(float %a12, float %b12)
+ %fcopysign13 = call float @llvm.copysign.f32(float %a13, float %b13)
+ %fcopysign14 = call float @llvm.copysign.f32(float %a14, float %b14)
+ %fcopysign15 = call float @llvm.copysign.f32(float %a15, float %b15)
+ store float %fcopysign0 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 0), align 4
+ store float %fcopysign1 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 1), align 4
+ store float %fcopysign2 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 2), align 4
+ store float %fcopysign3 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 3), align 4
+ store float %fcopysign4 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 4), align 4
+ store float %fcopysign5 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 5), align 4
+ store float %fcopysign6 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 6), align 4
+ store float %fcopysign7 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 7), align 4
+ store float %fcopysign8 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 8), align 4
+ store float %fcopysign9 , float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 9), align 4
+ store float %fcopysign10, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 10), align 4
+ store float %fcopysign11, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 11), align 4
+ store float %fcopysign12, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 12), align 4
+ store float %fcopysign13, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 13), align 4
+ store float %fcopysign14, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 14), align 4
+ store float %fcopysign15, float* getelementptr inbounds ([16 x float], [16 x float]* @dst32, i32 0, i64 15), align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
More information about the llvm-commits
mailing list