[llvm] b051c83 - [SLP][NFC] This adds a test for a follow-up patch that fixes a look-ahead operand reordering issue
Vasileios Porpodas via llvm-commits
llvm-commits at lists.llvm.org
Thu Mar 17 18:06:25 PDT 2022
Author: Vasileios Porpodas
Date: 2022-03-17T18:05:53-07:00
New Revision: b051c836c0c8ca722ef5d58cee7fa589da43aa28
URL: https://github.com/llvm/llvm-project/commit/b051c836c0c8ca722ef5d58cee7fa589da43aa28
DIFF: https://github.com/llvm/llvm-project/commit/b051c836c0c8ca722ef5d58cee7fa589da43aa28.diff
LOG: [SLP][NFC] This adds a test for a follow-up patch that fixes a look-ahead operand reordering issue
Differential Revision: https://reviews.llvm.org/D121353
Added:
Modified:
llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll
llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll b/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll
index ba3bd26d38610..64fac71fe4b12 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/lookahead.ll
@@ -642,3 +642,53 @@ define void @ChecksExtractScores_
diff erent_vectors(double* %storeArray, double*
store double %add1, double *%sidx1, align 8
ret void
}
+
+; This checks that we we prefer splats rather than reverse load vectors + shuffles.
+; 2-wide splat loads in x86 use a single instruction so they are quite cheap.
+define double @splat_loads(double *%array1, double *%array2, double *%ptrA, double *%ptrB) {
+; CHECK-LABEL: @splat_loads(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[GEP_1_0:%.*]] = getelementptr inbounds double, double* [[ARRAY1:%.*]], i64 0
+; CHECK-NEXT: [[GEP_1_1:%.*]] = getelementptr inbounds double, double* [[ARRAY1]], i64 1
+; CHECK-NEXT: [[TMP0:%.*]] = bitcast double* [[GEP_1_0]] to <2 x double>*
+; CHECK-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 8
+; CHECK-NEXT: [[GEP_2_0:%.*]] = getelementptr inbounds double, double* [[ARRAY2:%.*]], i64 0
+; CHECK-NEXT: [[GEP_2_1:%.*]] = getelementptr inbounds double, double* [[ARRAY2]], i64 1
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[GEP_2_0]] to <2 x double>*
+; CHECK-NEXT: [[TMP3:%.*]] = load <2 x double>, <2 x double>* [[TMP2]], align 8
+; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> poison, <2 x i32> <i32 1, i32 0>
+; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[TMP1]], [[SHUFFLE]]
+; CHECK-NEXT: [[TMP5:%.*]] = extractelement <2 x double> [[SHUFFLE]], i32 1
+; CHECK-NEXT: [[TMP6:%.*]] = insertelement <2 x double> poison, double [[TMP5]], i32 0
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x double> [[SHUFFLE]], i32 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP6]], double [[TMP7]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = fmul <2 x double> [[TMP1]], [[TMP8]]
+; CHECK-NEXT: [[TMP10:%.*]] = fadd <2 x double> [[TMP4]], [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = extractelement <2 x double> [[TMP10]], i32 0
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x double> [[TMP10]], i32 1
+; CHECK-NEXT: [[ADD3:%.*]] = fadd double [[TMP11]], [[TMP12]]
+; CHECK-NEXT: ret double [[ADD3]]
+;
+entry:
+ %gep_1_0 = getelementptr inbounds double, double* %array1, i64 0
+ %gep_1_1 = getelementptr inbounds double, double* %array1, i64 1
+ %ld_1_0 = load double, double* %gep_1_0, align 8
+ %ld_1_1 = load double, double* %gep_1_1, align 8
+
+ %gep_2_0 = getelementptr inbounds double, double* %array2, i64 0
+ %gep_2_1 = getelementptr inbounds double, double* %array2, i64 1
+ %ld_2_0 = load double, double* %gep_2_0, align 8
+ %ld_2_1 = load double, double* %gep_2_1, align 8
+
+ %mul1 = fmul double %ld_1_0, %ld_2_0
+ %mul2 = fmul double %ld_1_1, %ld_2_0
+
+ %mul3 = fmul double %ld_1_0, %ld_2_1
+ %mul4 = fmul double %ld_1_1, %ld_2_1
+
+ %add1 = fadd double %mul1, %mul3
+ %add2 = fadd double %mul2, %mul4
+
+ %add3 = fadd double %add1, %add2
+ ret double %add3
+}
diff --git a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll
index 2ebf8d91f080f..51d564f0bfacc 100644
--- a/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll
+++ b/llvm/test/Transforms/SLPVectorizer/X86/operandorder.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -basic-aa -slp-vectorizer -slp-threshold=-100 -instcombine -dce -S -mtriple=i386-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
+; RUN: opt < %s -basic-aa -slp-vectorizer -slp-threshold=-100 -instcombine -dce -S -mtriple=i386-apple-macosx10.8.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE2
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128-n8:16:32-S128"
@@ -16,6 +17,16 @@ define void @shuffle_operands1(double * noalias %from, double * noalias %to, dou
; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4
; CHECK-NEXT: ret void
+;
+; SSE2-LABEL: @shuffle_operands1(
+; SSE2-NEXT: [[TMP1:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 4
+; SSE2-NEXT: [[TMP3:%.*]] = insertelement <2 x double> poison, double [[V1:%.*]], i64 0
+; SSE2-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[V2:%.*]], i64 1
+; SSE2-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[TMP2]], [[TMP4]]
+; SSE2-NEXT: [[TMP6:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4
+; SSE2-NEXT: ret void
;
%from_1 = getelementptr double, double *%from, i64 1
%v0_1 = load double , double * %from
@@ -45,6 +56,22 @@ define void @vecload_vs_broadcast(double * noalias %from, double * noalias %to,
; CHECK: ext:
; CHECK-NEXT: ret void
;
+; SSE2-LABEL: @vecload_vs_broadcast(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: br label [[LP:%.*]]
+; SSE2: lp:
+; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4
+; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[P]], i64 0
+; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP1]], <2 x i32> <i32 0, i32 2>
+; SSE2-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP1]], [[TMP3]]
+; SSE2-NEXT: [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]]
+; SSE2: ext:
+; SSE2-NEXT: ret void
+;
entry:
br label %lp
@@ -81,6 +108,22 @@ define void @vecload_vs_broadcast2(double * noalias %from, double * noalias %to,
; CHECK: ext:
; CHECK-NEXT: ret void
;
+; SSE2-LABEL: @vecload_vs_broadcast2(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: br label [[LP:%.*]]
+; SSE2: lp:
+; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4
+; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[P]], i64 0
+; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP1]], <2 x i32> <i32 0, i32 2>
+; SSE2-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP3]], [[TMP1]]
+; SSE2-NEXT: [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]]
+; SSE2: ext:
+; SSE2-NEXT: ret void
+;
entry:
br label %lp
@@ -117,6 +160,22 @@ define void @vecload_vs_broadcast3(double * noalias %from, double * noalias %to,
; CHECK: ext:
; CHECK-NEXT: ret void
;
+; SSE2-LABEL: @vecload_vs_broadcast3(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: br label [[LP:%.*]]
+; SSE2: lp:
+; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4
+; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> poison, double [[P]], i64 0
+; SSE2-NEXT: [[TMP3:%.*]] = shufflevector <2 x double> [[TMP2]], <2 x double> [[TMP1]], <2 x i32> <i32 0, i32 2>
+; SSE2-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[TMP3]], [[TMP1]]
+; SSE2-NEXT: [[TMP5:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP4]], <2 x double>* [[TMP5]], align 4
+; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]]
+; SSE2: ext:
+; SSE2-NEXT: ret void
+;
entry:
br label %lp
@@ -153,6 +212,22 @@ define void @shuffle_nodes_match1(double * noalias %from, double * noalias %to,
; CHECK: ext:
; CHECK-NEXT: ret void
;
+; SSE2-LABEL: @shuffle_nodes_match1(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: br label [[LP:%.*]]
+; SSE2: lp:
+; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4
+; SSE2-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> <i32 1, i32 0>
+; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[P]], i64 1
+; SSE2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], [[SHUFFLE]]
+; SSE2-NEXT: [[TMP4:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 4
+; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]]
+; SSE2: ext:
+; SSE2-NEXT: ret void
+;
entry:
br label %lp
@@ -189,6 +264,22 @@ define void @vecload_vs_broadcast4(double * noalias %from, double * noalias %to,
; CHECK: ext:
; CHECK-NEXT: ret void
;
+; SSE2-LABEL: @vecload_vs_broadcast4(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: br label [[LP:%.*]]
+; SSE2: lp:
+; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4
+; SSE2-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> <i32 1, i32 0>
+; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[P]], i64 1
+; SSE2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], [[SHUFFLE]]
+; SSE2-NEXT: [[TMP4:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 4
+; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]]
+; SSE2: ext:
+; SSE2-NEXT: ret void
+;
entry:
br label %lp
@@ -226,6 +317,22 @@ define void @shuffle_nodes_match2(double * noalias %from, double * noalias %to,
; CHECK: ext:
; CHECK-NEXT: ret void
;
+; SSE2-LABEL: @shuffle_nodes_match2(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: br label [[LP:%.*]]
+; SSE2: lp:
+; SSE2-NEXT: [[P:%.*]] = phi double [ 1.000000e+00, [[LP]] ], [ 0.000000e+00, [[ENTRY:%.*]] ]
+; SSE2-NEXT: [[TMP0:%.*]] = bitcast double* [[FROM:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP1:%.*]] = load <2 x double>, <2 x double>* [[TMP0]], align 4
+; SSE2-NEXT: [[SHUFFLE:%.*]] = shufflevector <2 x double> [[TMP1]], <2 x double> poison, <2 x i32> <i32 1, i32 0>
+; SSE2-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[P]], i64 1
+; SSE2-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[SHUFFLE]], [[TMP2]]
+; SSE2-NEXT: [[TMP4:%.*]] = bitcast double* [[TO:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP3]], <2 x double>* [[TMP4]], align 4
+; SSE2-NEXT: br i1 undef, label [[LP]], label [[EXT:%.*]]
+; SSE2: ext:
+; SSE2-NEXT: ret void
+;
entry:
br label %lp
@@ -288,6 +395,43 @@ define void @good_load_order() {
; CHECK: for.end:
; CHECK-NEXT: ret void
;
+; SSE2-LABEL: @good_load_order(
+; SSE2-NEXT: entry:
+; SSE2-NEXT: br label [[FOR_COND1_PREHEADER:%.*]]
+; SSE2: for.cond1.preheader:
+; SSE2-NEXT: [[TMP0:%.*]] = load float, float* getelementptr inbounds ([32000 x float], [32000 x float]* @a, i32 0, i32 0), align 16
+; SSE2-NEXT: br label [[FOR_BODY3:%.*]]
+; SSE2: for.body3:
+; SSE2-NEXT: [[TMP1:%.*]] = phi float [ [[TMP0]], [[FOR_COND1_PREHEADER]] ], [ [[TMP14:%.*]], [[FOR_BODY3]] ]
+; SSE2-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY3]] ]
+; SSE2-NEXT: [[TMP2:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; SSE2-NEXT: [[TMP3:%.*]] = add i32 [[TMP2]], 1
+; SSE2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP3]]
+; SSE2-NEXT: [[TMP4:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; SSE2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP4]]
+; SSE2-NEXT: [[TMP5:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; SSE2-NEXT: [[TMP6:%.*]] = add i32 [[TMP5]], 4
+; SSE2-NEXT: [[ARRAYIDX31:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP6]]
+; SSE2-NEXT: [[TMP7:%.*]] = bitcast float* [[ARRAYIDX]] to <4 x float>*
+; SSE2-NEXT: [[TMP8:%.*]] = load <4 x float>, <4 x float>* [[TMP7]], align 4
+; SSE2-NEXT: [[TMP9:%.*]] = insertelement <4 x float> poison, float [[TMP1]], i64 0
+; SSE2-NEXT: [[TMP10:%.*]] = shufflevector <4 x float> [[TMP9]], <4 x float> [[TMP8]], <4 x i32> <i32 0, i32 4, i32 5, i32 6>
+; SSE2-NEXT: [[TMP11:%.*]] = fmul <4 x float> [[TMP8]], [[TMP10]]
+; SSE2-NEXT: [[TMP12:%.*]] = bitcast float* [[ARRAYIDX5]] to <4 x float>*
+; SSE2-NEXT: store <4 x float> [[TMP11]], <4 x float>* [[TMP12]], align 4
+; SSE2-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 5
+; SSE2-NEXT: [[TMP13:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; SSE2-NEXT: [[ARRAYIDX41:%.*]] = getelementptr inbounds [32000 x float], [32000 x float]* @a, i32 0, i32 [[TMP13]]
+; SSE2-NEXT: [[TMP14]] = load float, float* [[ARRAYIDX41]], align 4
+; SSE2-NEXT: [[TMP15:%.*]] = extractelement <4 x float> [[TMP8]], i64 3
+; SSE2-NEXT: [[MUL45:%.*]] = fmul float [[TMP14]], [[TMP15]]
+; SSE2-NEXT: store float [[MUL45]], float* [[ARRAYIDX31]], align 4
+; SSE2-NEXT: [[TMP16:%.*]] = trunc i64 [[INDVARS_IV_NEXT]] to i32
+; SSE2-NEXT: [[CMP2:%.*]] = icmp slt i32 [[TMP16]], 31995
+; SSE2-NEXT: br i1 [[CMP2]], label [[FOR_BODY3]], label [[FOR_END:%.*]]
+; SSE2: for.end:
+; SSE2-NEXT: ret void
+;
entry:
br label %for.cond1.preheader
@@ -346,6 +490,16 @@ define void @load_reorder_double(double* nocapture %c, double* noalias nocapture
; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>*
; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4
; CHECK-NEXT: ret void
+;
+; SSE2-LABEL: @load_reorder_double(
+; SSE2-NEXT: [[TMP1:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 4
+; SSE2-NEXT: [[TMP3:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
+; SSE2-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 4
+; SSE2-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[TMP2]], [[TMP4]]
+; SSE2-NEXT: [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>*
+; SSE2-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 4
+; SSE2-NEXT: ret void
;
%1 = load double, double* %a
%2 = load double, double* %b
@@ -377,6 +531,16 @@ define void @load_reorder_float(float* nocapture %c, float* noalias nocapture re
; CHECK-NEXT: [[TMP6:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
; CHECK-NEXT: store <4 x float> [[TMP5]], <4 x float>* [[TMP6]], align 4
; CHECK-NEXT: ret void
+;
+; SSE2-LABEL: @load_reorder_float(
+; SSE2-NEXT: [[TMP1:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; SSE2-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; SSE2-NEXT: [[TMP3:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; SSE2-NEXT: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
+; SSE2-NEXT: [[TMP5:%.*]] = fadd <4 x float> [[TMP2]], [[TMP4]]
+; SSE2-NEXT: [[TMP6:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
+; SSE2-NEXT: store <4 x float> [[TMP5]], <4 x float>* [[TMP6]], align 4
+; SSE2-NEXT: ret void
;
%1 = load float, float* %a
%2 = load float, float* %b
@@ -425,6 +589,19 @@ define void @opcode_reorder(float* noalias nocapture %a, float* noalias nocaptur
; CHECK-NEXT: [[TMP9:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
; CHECK-NEXT: store <4 x float> [[TMP8]], <4 x float>* [[TMP9]], align 4
; CHECK-NEXT: ret void
+;
+; SSE2-LABEL: @opcode_reorder(
+; SSE2-NEXT: [[TMP1:%.*]] = bitcast float* [[B:%.*]] to <4 x float>*
+; SSE2-NEXT: [[TMP2:%.*]] = load <4 x float>, <4 x float>* [[TMP1]], align 4
+; SSE2-NEXT: [[TMP3:%.*]] = bitcast float* [[C:%.*]] to <4 x float>*
+; SSE2-NEXT: [[TMP4:%.*]] = load <4 x float>, <4 x float>* [[TMP3]], align 4
+; SSE2-NEXT: [[TMP5:%.*]] = fadd <4 x float> [[TMP2]], [[TMP4]]
+; SSE2-NEXT: [[TMP6:%.*]] = bitcast float* [[D:%.*]] to <4 x float>*
+; SSE2-NEXT: [[TMP7:%.*]] = load <4 x float>, <4 x float>* [[TMP6]], align 4
+; SSE2-NEXT: [[TMP8:%.*]] = fadd <4 x float> [[TMP7]], [[TMP5]]
+; SSE2-NEXT: [[TMP9:%.*]] = bitcast float* [[A:%.*]] to <4 x float>*
+; SSE2-NEXT: store <4 x float> [[TMP8]], <4 x float>* [[TMP9]], align 4
+; SSE2-NEXT: ret void
;
%1 = load float, float* %b
%2 = load float, float* %c
More information about the llvm-commits
mailing list