[llvm] b339bbd - [Matrix] Use ArrayType for allocas instead of VectorType.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 28 02:48:09 PST 2022
Author: Florian Hahn
Date: 2022-01-28T10:47:52Z
New Revision: b339bbdb197052ec4a346d95e74cdffa17469904
URL: https://github.com/llvm/llvm-project/commit/b339bbdb197052ec4a346d95e74cdffa17469904
DIFF: https://github.com/llvm/llvm-project/commit/b339bbdb197052ec4a346d95e74cdffa17469904.diff
LOG: [Matrix] Use ArrayType for allocas instead of VectorType.
When creating an alloca to copy a matrix due to memory conflicts, those
allocas used to use VectorTypes, which forced them to have huge
alignments for large vectors.
This patch updates LowerMatrixIntrinsics to use a corresponding array
type, like Clang already does, to get more manageable alignments.
Reviewed By: anemet, thegameg
Differential Revision: https://reviews.llvm.org/D118239
Added:
Modified:
llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-dominance.ll
llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll
llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index 8f1d0181ee5b..296becb31e8f 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -1339,16 +1339,21 @@ class LowerMatrixIntrinsics {
// Copy load operand to new alloca.
Builder.SetInsertPoint(Copy, Copy->begin());
- AllocaInst *NewLd =
- Builder.CreateAlloca(Load->getType(), Load->getPointerAddressSpace());
- Builder.CreateMemCpy(NewLd, NewLd->getAlign(),
- Load->getPointerOperand(), Load->getAlign(),
- LoadLoc.Size.getValue());
+ auto *VT = cast<FixedVectorType>(Load->getType());
+ // Use an array type for the alloca, to avoid potentially huge alignment
+ // requirements for large vector types.
+ auto *ArrayTy = ArrayType::get(VT->getElementType(), VT->getNumElements());
+ AllocaInst *Alloca =
+ Builder.CreateAlloca(ArrayTy, Load->getPointerAddressSpace());
+ Value *BC = Builder.CreateBitCast(Alloca, VT->getPointerTo());
+
+ Builder.CreateMemCpy(BC, Alloca->getAlign(), Load->getPointerOperand(),
+ Load->getAlign(), LoadLoc.Size.getValue());
Builder.SetInsertPoint(Fusion, Fusion->begin());
PHINode *PHI = Builder.CreatePHI(Load->getPointerOperandType(), 3);
PHI->addIncoming(Load->getPointerOperand(), Check0);
PHI->addIncoming(Load->getPointerOperand(), Check1);
- PHI->addIncoming(NewLd, Copy);
+ PHI->addIncoming(BC, Copy);
// Adjust DT.
DTUpdates.push_back({DT->Insert, Check0, Check1});
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-dominance.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-dominance.ll
index bfff576d21e6..289cd8933410 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-dominance.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-dominance.ll
@@ -20,10 +20,10 @@ define void @multiply_can_hoist_cast(<4 x double>* noalias %A, <4 x double> * %B
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[LOAD_END]], [[STORE_BEGIN]]
; CHECK-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]]
; CHECK: copy:
-; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x double>, align 32
+; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x double>, align 8
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[TMP2]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double>* [[B]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 32 dereferenceable(32) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(32) [[TMP4]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(32) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(32) [[TMP4]], i64 32, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS]]
; CHECK: no_alias:
; CHECK-NEXT: [[TMP5:%.*]] = phi <4 x double>* [ [[B]], [[ENTRY:%.*]] ], [ [[B]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
@@ -115,10 +115,10 @@ define void @multiply_can_hoist_multiple_insts(<4 x double>* noalias %A, <4 x do
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[LOAD_END]], [[STORE_BEGIN]]
; CHECK-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]]
; CHECK: copy:
-; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x double>, align 32
+; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x double>, align 8
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[TMP2]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double>* [[B]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 32 dereferenceable(32) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(32) [[TMP4]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(32) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(32) [[TMP4]], i64 32, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS]]
; CHECK: no_alias:
; CHECK-NEXT: [[TMP5:%.*]] = phi <4 x double>* [ [[B]], [[ENTRY:%.*]] ], [ [[B]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
@@ -212,10 +212,10 @@ define void @multiply_can_hoist_multiple_insts2(<4 x double>* noalias %A, <4 x d
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[LOAD_END]], [[STORE_BEGIN]]
; CHECK-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]]
; CHECK: copy:
-; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x double>, align 32
+; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x double>, align 8
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x double>* [[TMP2]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x double>* [[B]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 32 dereferenceable(32) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(32) [[TMP4]], i64 32, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(32) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(32) [[TMP4]], i64 32, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS]]
; CHECK: no_alias:
; CHECK-NEXT: [[TMP5:%.*]] = phi <4 x double>* [ [[B]], [[ENTRY:%.*]] ], [ [[B]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll
index 070bf1718d5b..08eb3398cdc6 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-loops.ll
@@ -286,10 +286,10 @@ define void @multiply_alias_2x2(<4 x float>* %A, <4 x float>* %B, <4 x float>* %
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[LOAD_END]], [[STORE_BEGIN]]
; CHECK-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]]
; CHECK: copy:
-; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x float>, align 16
+; CHECK-NEXT: [[TMP2:%.*]] = alloca <4 x float>, align 4
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <4 x float>* [[TMP2]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x float>* [[A]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(16) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(16) [[TMP4]], i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(16) [[TMP4]], i64 16, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS]]
; CHECK: no_alias:
; CHECK-NEXT: [[TMP5:%.*]] = phi <4 x float>* [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
@@ -303,10 +303,10 @@ define void @multiply_alias_2x2(<4 x float>* %A, <4 x float>* %B, <4 x float>* %
; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[LOAD_END7]], [[STORE_BEGIN4]]
; CHECK-NEXT: br i1 [[TMP7]], label [[COPY2:%.*]], label [[NO_ALIAS3]]
; CHECK: copy2:
-; CHECK-NEXT: [[TMP8:%.*]] = alloca <4 x float>, align 16
+; CHECK-NEXT: [[TMP8:%.*]] = alloca <4 x float>, align 4
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <4 x float>* [[TMP8]] to i8*
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <4 x float>* [[B]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 16 dereferenceable(16) [[TMP9]], i8* noundef nonnull align 8 dereferenceable(16) [[TMP10]], i64 16, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 4 dereferenceable(16) [[TMP9]], i8* noundef nonnull align 8 dereferenceable(16) [[TMP10]], i64 16, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS3]]
; CHECK: no_alias3:
; CHECK-NEXT: [[TMP11:%.*]] = phi <4 x float>* [ [[B]], [[NO_ALIAS]] ], [ [[B]], [[ALIAS_CONT1]] ], [ [[TMP8]], [[COPY2]] ]
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
index dfcb2954dfb6..2e9657d8acc6 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
@@ -32,13 +32,14 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[LOAD_END]], [[STORE_BEGIN]]
; CHECK-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]]
; CHECK: copy:
-; CHECK-NEXT: [[TMP2:%.*]] = alloca <6 x double>, align 64
-; CHECK-NEXT: [[TMP3:%.*]] = bitcast <6 x double>* [[TMP2]] to i8*
+; CHECK-NEXT: [[TMP2:%.*]] = alloca [6 x double], align 8
+; CHECK-NEXT: [[BC:%.+]] = bitcast [6 x double]* [[TMP2]] to <6 x double>*
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast [6 x double]* [[TMP2]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <6 x double>* [[A]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 64 dereferenceable(48) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP4]], i64 48, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(48) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP4]], i64 48, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS]]
; CHECK: no_alias:
-; CHECK-NEXT: [[TMP5:%.*]] = phi <6 x double>* [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi <6 x double>* [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[BC]], [[COPY]] ]
; CHECK-NEXT: [[STORE_BEGIN4:%.*]] = ptrtoint <9 x double>* [[C]] to i64
; CHECK-NEXT: [[STORE_END5:%.*]] = add nuw nsw i64 [[STORE_BEGIN4]], 72
; CHECK-NEXT: [[LOAD_BEGIN6:%.*]] = ptrtoint <6 x double>* [[B]] to i64
@@ -49,13 +50,14 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[LOAD_END7]], [[STORE_BEGIN4]]
; CHECK-NEXT: br i1 [[TMP7]], label [[COPY2:%.*]], label [[NO_ALIAS3]]
; CHECK: copy2:
-; CHECK-NEXT: [[TMP8:%.*]] = alloca <6 x double>, align 64
-; CHECK-NEXT: [[TMP9:%.*]] = bitcast <6 x double>* [[TMP8]] to i8*
+; CHECK-NEXT: [[TMP8:%.*]] = alloca [6 x double], align 8
+; CHECK-NEXT: [[BC2:%.+]] = bitcast [6 x double]* [[TMP8]] to <6 x double>*
+; CHECK-NEXT: [[TMP9:%.*]] = bitcast [6 x double]* [[TMP8]] to i8*
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <6 x double>* [[B]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 64 dereferenceable(48) [[TMP9]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP10]], i64 48, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(48) [[TMP9]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP10]], i64 48, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS3]]
; CHECK: no_alias3:
-; CHECK-NEXT: [[TMP11:%.*]] = phi <6 x double>* [ [[B]], [[NO_ALIAS]] ], [ [[B]], [[ALIAS_CONT1]] ], [ [[TMP8]], [[COPY2]] ]
+; CHECK-NEXT: [[TMP11:%.*]] = phi <6 x double>* [ [[B]], [[NO_ALIAS]] ], [ [[B]], [[ALIAS_CONT1]] ], [ [[BC2]], [[COPY2]] ]
; CHECK-NEXT: [[VEC_CAST:%.*]] = bitcast <6 x double>* [[TMP5]] to <2 x double>*
; CHECK-NEXT: [[COL_LOAD:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST]], align 8
; CHECK-NEXT: [[VEC_GEP:%.*]] = getelementptr <6 x double>, <6 x double>* [[TMP5]], i64 0, i64 3
@@ -169,13 +171,14 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
; CHECK-NEXT: [[TMP37:%.*]] = icmp ugt i64 [[LOAD_END97]], [[STORE_BEGIN94]]
; CHECK-NEXT: br i1 [[TMP37]], label [[COPY92:%.*]], label [[NO_ALIAS93]]
; CHECK: copy92:
-; CHECK-NEXT: [[TMP38:%.*]] = alloca <6 x double>, align 64
-; CHECK-NEXT: [[TMP39:%.*]] = bitcast <6 x double>* [[TMP38]] to i8*
+; CHECK-NEXT: [[TMP38:%.*]] = alloca [6 x double], align 8
+; CHECK-NEXT: [[BC3:%.+]] = bitcast [6 x double]* [[TMP38]] to <6 x double>*
+; CHECK-NEXT: [[TMP39:%.*]] = bitcast [6 x double]* [[TMP38]] to i8*
; CHECK-NEXT: [[TMP40:%.*]] = bitcast <6 x double>* [[A]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 64 dereferenceable(48) [[TMP39]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP40]], i64 48, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(48) [[TMP39]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP40]], i64 48, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS93]]
; CHECK: no_alias93:
-; CHECK-NEXT: [[TMP41:%.*]] = phi <6 x double>* [ [[A]], [[END]] ], [ [[A]], [[ALIAS_CONT91]] ], [ [[TMP38]], [[COPY92]] ]
+; CHECK-NEXT: [[TMP41:%.*]] = phi <6 x double>* [ [[A]], [[END]] ], [ [[A]], [[ALIAS_CONT91]] ], [ [[BC3]], [[COPY92]] ]
; CHECK-NEXT: [[STORE_BEGIN101:%.*]] = ptrtoint <9 x double>* [[C]] to i64
; CHECK-NEXT: [[STORE_END102:%.*]] = add nuw nsw i64 [[STORE_BEGIN101]], 72
; CHECK-NEXT: [[LOAD_BEGIN103:%.*]] = ptrtoint <6 x double>* [[B]] to i64
@@ -186,13 +189,14 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
; CHECK-NEXT: [[TMP43:%.*]] = icmp ugt i64 [[LOAD_END104]], [[STORE_BEGIN101]]
; CHECK-NEXT: br i1 [[TMP43]], label [[COPY99:%.*]], label [[NO_ALIAS100]]
; CHECK: copy99:
-; CHECK-NEXT: [[TMP44:%.*]] = alloca <6 x double>, align 64
-; CHECK-NEXT: [[TMP45:%.*]] = bitcast <6 x double>* [[TMP44]] to i8*
+; CHECK-NEXT: [[TMP44:%.*]] = alloca [6 x double], align 8
+; CHECK-NEXT: [[BC4:%.+]] = bitcast [6 x double]* [[TMP44]] to <6 x double>*
+; CHECK-NEXT: [[TMP45:%.*]] = bitcast [6 x double]* [[TMP44]] to i8*
; CHECK-NEXT: [[TMP46:%.*]] = bitcast <6 x double>* [[B]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 64 dereferenceable(48) [[TMP45]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP46]], i64 48, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(48) [[TMP45]], i8* noundef nonnull align 8 dereferenceable(48) [[TMP46]], i64 48, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS100]]
; CHECK: no_alias100:
-; CHECK-NEXT: [[TMP47:%.*]] = phi <6 x double>* [ [[B]], [[NO_ALIAS93]] ], [ [[B]], [[ALIAS_CONT98]] ], [ [[TMP44]], [[COPY99]] ]
+; CHECK-NEXT: [[TMP47:%.*]] = phi <6 x double>* [ [[B]], [[NO_ALIAS93]] ], [ [[B]], [[ALIAS_CONT98]] ], [ [[BC4]], [[COPY99]] ]
; CHECK-NEXT: [[VEC_CAST106:%.*]] = bitcast <6 x double>* [[TMP41]] to <2 x double>*
; CHECK-NEXT: [[COL_LOAD107:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST106]], align 8
; CHECK-NEXT: [[VEC_GEP108:%.*]] = getelementptr <6 x double>, <6 x double>* [[TMP41]], i64 0, i64 3
diff --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
index 457edfaed06e..569fe4a55887 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
@@ -22,10 +22,10 @@ define void @multiply(<16 x double> * %A, <16 x double> * %B, <16 x double>* %C)
; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt i64 [[LOAD_END]], [[STORE_BEGIN]]
; CHECK-NEXT: br i1 [[TMP1]], label [[COPY:%.*]], label [[NO_ALIAS]]
; CHECK: copy:
-; CHECK-NEXT: [[TMP2:%.*]] = alloca <16 x double>, align 128
+; CHECK-NEXT: [[TMP2:%.*]] = alloca <16 x double>, align 8
; CHECK-NEXT: [[TMP3:%.*]] = bitcast <16 x double>* [[TMP2]] to i8*
; CHECK-NEXT: [[TMP4:%.*]] = bitcast <16 x double>* [[A]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 128 dereferenceable(128) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(128) [[TMP4]], i64 128, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(128) [[TMP3]], i8* noundef nonnull align 8 dereferenceable(128) [[TMP4]], i64 128, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS]]
; CHECK: no_alias:
; CHECK-NEXT: [[TMP5:%.*]] = phi <16 x double>* [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
@@ -39,10 +39,10 @@ define void @multiply(<16 x double> * %A, <16 x double> * %B, <16 x double>* %C)
; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[LOAD_END7]], [[STORE_BEGIN4]]
; CHECK-NEXT: br i1 [[TMP7]], label [[COPY2:%.*]], label [[NO_ALIAS3]]
; CHECK: copy2:
-; CHECK-NEXT: [[TMP8:%.*]] = alloca <16 x double>, align 128
+; CHECK-NEXT: [[TMP8:%.*]] = alloca <16 x double>, align 8
; CHECK-NEXT: [[TMP9:%.*]] = bitcast <16 x double>* [[TMP8]] to i8*
; CHECK-NEXT: [[TMP10:%.*]] = bitcast <16 x double>* [[B]] to i8*
-; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 128 dereferenceable(128) [[TMP9]], i8* noundef nonnull align 8 dereferenceable(128) [[TMP10]], i64 128, i1 false)
+; CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* noundef nonnull align 8 dereferenceable(128) [[TMP9]], i8* noundef nonnull align 8 dereferenceable(128) [[TMP10]], i64 128, i1 false)
; CHECK-NEXT: br label [[NO_ALIAS3]]
; CHECK: no_alias3:
; CHECK-NEXT: [[TMP11:%.*]] = phi <16 x double>* [ [[B]], [[NO_ALIAS]] ], [ [[B]], [[ALIAS_CONT1]] ], [ [[TMP8]], [[COPY2]] ]
More information about the llvm-commits
mailing list