[llvm] e02c964 - [Matrix] Specify missing alignment in tests (NFC).

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 16 07:45:31 PDT 2020


Author: Florian Hahn
Date: 2020-06-16T15:37:35+01:00
New Revision: e02c9649699827cb7189d8662b42a829339c6ec0

URL: https://github.com/llvm/llvm-project/commit/e02c9649699827cb7189d8662b42a829339c6ec0
DIFF: https://github.com/llvm/llvm-project/commit/e02c9649699827cb7189d8662b42a829339c6ec0.diff

LOG: [Matrix] Specify missing alignment in tests (NFC).

Some tests were missing alignment info. Subsequent changes properly
preserve the set alignment. Set it properly beforehand, to avoid
unnecessary test changes.

It also updates cases where an alignment of 16 was specified, instead of
the vector element type alignment.

Added: 
    

Modified: 
    llvm/test/Transforms/LowerMatrixIntrinsics/bigger-expressions-double.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/propagate-backward.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/propagate-multiple-iterations.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/bigger-expressions-double.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/bigger-expressions-double.ll
index b72ecf5c0e4c..4ad6cc0fd6d6 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/bigger-expressions-double.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/bigger-expressions-double.ll
@@ -237,11 +237,11 @@ define void @transpose_multiply(<9 x double>* %A.Ptr, <9 x double>* %B.Ptr, <9 x
 ;
 
 entry:
-  %a = load <9 x double>, <9 x double>* %A.Ptr
-  %b = load <9 x double>, <9 x double>* %B.Ptr
+  %a = load <9 x double>, <9 x double>* %A.Ptr, align 8
+  %b = load <9 x double>, <9 x double>* %B.Ptr, align 8
   %a.trans  = call <9 x double> @llvm.matrix.transpose(<9 x double> %a, i32 3, i32 3)
   %c = call <9 x double> @llvm.matrix.multiply.v9f64.v9f64.v9f64(<9 x double> %a.trans, <9 x double> %b, i32 3, i32 3, i32 3)
-  store <9 x double> %c, <9 x double>* %C.Ptr
+  store <9 x double> %c, <9 x double>* %C.Ptr, align 8
   ret void
 }
 
@@ -501,13 +501,13 @@ define void @transpose_multiply_add(<9 x double>* %A.Ptr, <9 x double>* %B.Ptr,
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a = load <9 x double>, <9 x double>* %A.Ptr
-  %b = load <9 x double>, <9 x double>* %B.Ptr
+  %a = load <9 x double>, <9 x double>* %A.Ptr, align 8
+  %b = load <9 x double>, <9 x double>* %B.Ptr, align 8
   %a.trans  = call <9 x double> @llvm.matrix.transpose(<9 x double> %a, i32 3, i32 3)
   %mult = call <9 x double> @llvm.matrix.multiply.v9f64.v9f64.v9f64(<9 x double> %a.trans, <9 x double> %b, i32 3, i32 3, i32 3)
-  %c = load <9 x double>, <9 x double>* %C.Ptr
+  %c = load <9 x double>, <9 x double>* %C.Ptr, align 8
   %res = fadd <9 x double> %c, %mult
 
-  store <9 x double> %res, <9 x double>* %C.Ptr
+  store <9 x double> %res, <9 x double>* %C.Ptr, align 8
   ret void
 }

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll
index 4672c57f9e4f..3b12831a3314 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll
@@ -136,16 +136,16 @@ define void @multiply_sub_add_2x3_3x2(<6 x double>* %a.ptr, <6 x double>* %b.ptr
 ; RM-NEXT:    ret void
 ;
 entry:
-  %a = load <6 x double>, <6 x double>* %a.ptr
-  %b = load <6 x double>, <6 x double>* %b.ptr
+  %a = load <6 x double>, <6 x double>* %a.ptr, align 8
+  %b = load <6 x double>, <6 x double>* %b.ptr, align 8
   %add = fadd <6 x double> %a, %a
-  store <6 x double> %add, <6 x double>* %a.ptr
+  store <6 x double> %add, <6 x double>* %a.ptr, align 8
   %sub = fsub <6 x double> %b, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
-  store <6 x double> %sub, <6 x double>* %b.ptr
+  store <6 x double> %sub, <6 x double>* %b.ptr, align 8
   %mul = call <4 x double> @llvm.matrix.multiply.v4f64.v6f64.v6f64(<6 x double> %add, <6 x double> %sub, i32 2, i32 3, i32 2)
-  %c = load <4 x double>, <4 x double>* %c.ptr
+  %c = load <4 x double>, <4 x double>* %c.ptr, align 8
   %res = fsub <4 x double> %c, %mul
-  store <4 x double> %res, <4 x double>* %c.ptr
+  store <4 x double> %res, <4 x double>* %c.ptr, align 8
   ret void
 }
 

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
index f7cb89d64b9f..f70e105ecca8 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
@@ -35,7 +35,7 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
 ; CHECK-NEXT:    [[TMP2:%.*]] = alloca <6 x double>, align 64
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <6 x double>* [[TMP2]] to i8*
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <6 x double>* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP3]], i8* nonnull align 16 dereferenceable(48) [[TMP4]], i64 48, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP3]], i8* nonnull align 8 dereferenceable(48) [[TMP4]], i64 48, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS]]
 ; CHECK:       no_alias:
 ; CHECK-NEXT:    [[TMP5:%.*]] = phi <6 x double>* [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
@@ -52,7 +52,7 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
 ; CHECK-NEXT:    [[TMP8:%.*]] = alloca <6 x double>, align 64
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <6 x double>* [[TMP8]] to i8*
 ; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <6 x double>* [[B]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP9]], i8* nonnull align 16 dereferenceable(48) [[TMP10]], i64 48, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP9]], i8* nonnull align 8 dereferenceable(48) [[TMP10]], i64 48, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS5]]
 ; CHECK:       no_alias3:
 ; CHECK-NEXT:    [[TMP11:%.*]] = phi <6 x double>* [ [[B]], [[NO_ALIAS]] ], [ [[B]], [[ALIAS_CONT3]] ], [ [[TMP8]], [[COPY4]] ]
@@ -172,7 +172,7 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
 ; CHECK-NEXT:    [[TMP38:%.*]] = alloca <6 x double>, align 64
 ; CHECK-NEXT:    [[TMP39:%.*]] = bitcast <6 x double>* [[TMP38]] to i8*
 ; CHECK-NEXT:    [[TMP40:%.*]] = bitcast <6 x double>* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP39]], i8* nonnull align 16 dereferenceable(48) [[TMP40]], i64 48, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP39]], i8* nonnull align 8 dereferenceable(48) [[TMP40]], i64 48, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS96]]
 ; CHECK:       no_alias93:
 ; CHECK-NEXT:    [[TMP41:%.*]] = phi <6 x double>* [ [[A]], [[END]] ], [ [[A]], [[ALIAS_CONT94]] ], [ [[TMP38]], [[COPY95]] ]
@@ -189,7 +189,7 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
 ; CHECK-NEXT:    [[TMP44:%.*]] = alloca <6 x double>, align 64
 ; CHECK-NEXT:    [[TMP45:%.*]] = bitcast <6 x double>* [[TMP44]] to i8*
 ; CHECK-NEXT:    [[TMP46:%.*]] = bitcast <6 x double>* [[B]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP45]], i8* nonnull align 16 dereferenceable(48) [[TMP46]], i64 48, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP45]], i8* nonnull align 8 dereferenceable(48) [[TMP46]], i64 48, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS103]]
 ; CHECK:       no_alias100:
 ; CHECK-NEXT:    [[TMP47:%.*]] = phi <6 x double>* [ [[B]], [[NO_ALIAS96]] ], [ [[B]], [[ALIAS_CONT101]] ], [ [[TMP44]], [[COPY102]] ]
@@ -275,10 +275,10 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a = load <6 x double>, <6 x double>* %A, align 16
-  %b = load <6 x double>, <6 x double>* %B, align 16
+  %a = load <6 x double>, <6 x double>* %A, align 8
+  %b = load <6 x double>, <6 x double>* %B, align 8
   %c = call <9 x double> @llvm.matrix.multiply(<6 x double> %a, <6 x double> %b, i32 3, i32 2, i32 3)
-  store <9 x double> %c, <9 x double>* %C, align 16
+  store <9 x double> %c, <9 x double>* %C, align 8
 
   br i1 %cond, label %true, label %false
 
@@ -293,10 +293,10 @@ false:
   br label %end
 
 end:
-  %a.2 = load <6 x double>, <6 x double>* %A, align 16
-  %b.2 = load <6 x double>, <6 x double>* %B, align 16
+  %a.2 = load <6 x double>, <6 x double>* %A, align 8
+  %b.2 = load <6 x double>, <6 x double>* %B, align 8
   %c.2 = call <9 x double> @llvm.matrix.multiply(<6 x double> %a.2, <6 x double> %b.2, i32 3, i32 2, i32 3)
-  store <9 x double> %c.2, <9 x double>* %C, align 16
+  store <9 x double> %c.2, <9 x double>* %C, align 8
   ret void
 }
 

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
index b734d13e922f..3ec7c4285e75 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused.ll
@@ -21,7 +21,7 @@ define void @multiply(<16 x double> * %A, <16 x double> * %B, <16 x double>* %C)
 ; CHECK-NEXT:    [[TMP2:%.*]] = alloca <16 x double>, align 128
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <16 x double>* [[TMP2]] to i8*
 ; CHECK-NEXT:    [[TMP4:%.*]] = bitcast <16 x double>* [[A]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 128 dereferenceable(128) [[TMP3]], i8* nonnull align 16 dereferenceable(128) [[TMP4]], i64 128, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 128 dereferenceable(128) [[TMP3]], i8* nonnull align 8 dereferenceable(128) [[TMP4]], i64 128, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS]]
 ; CHECK:       no_alias:
 ; CHECK-NEXT:    [[TMP5:%.*]] = phi <16 x double>* [ [[A]], [[ENTRY:%.*]] ], [ [[A]], [[ALIAS_CONT]] ], [ [[TMP2]], [[COPY]] ]
@@ -38,7 +38,7 @@ define void @multiply(<16 x double> * %A, <16 x double> * %B, <16 x double>* %C)
 ; CHECK-NEXT:    [[TMP8:%.*]] = alloca <16 x double>, align 128
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast <16 x double>* [[TMP8]] to i8*
 ; CHECK-NEXT:    [[TMP10:%.*]] = bitcast <16 x double>* [[B]] to i8*
-; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 128 dereferenceable(128) [[TMP9]], i8* nonnull align 16 dereferenceable(128) [[TMP10]], i64 128, i1 false)
+; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 128 dereferenceable(128) [[TMP9]], i8* nonnull align 8 dereferenceable(128) [[TMP10]], i64 128, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS5]]
 
 ; CHECK:       no_alias3:
@@ -261,12 +261,12 @@ define void @multiply(<16 x double> * %A, <16 x double> * %B, <16 x double>* %C)
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %a = load <16 x double>, <16 x double>* %A, align 16
-  %b = load <16 x double>, <16 x double>* %B, align 16
+  %a = load <16 x double>, <16 x double>* %A, align 8
+  %b = load <16 x double>, <16 x double>* %B, align 8
 
   %c = call <16 x double> @llvm.matrix.multiply(<16 x double> %a, <16 x double> %b, i32 4, i32 4, i32 4)
 
-  store <16 x double> %c, <16 x double>* %C, align 16
+  store <16 x double> %c, <16 x double>* %C, align 8
   ret void
 }
 

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-backward.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-backward.ll
index 22e7b321df7b..88b31efc0367 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-backward.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-backward.ll
@@ -87,7 +87,7 @@ define <8 x double> @load_fadd_transpose(<8 x double>* %A.Ptr, <8 x double> %b)
 
 
 entry:
-  %a = load <8 x double>, <8 x double>* %A.Ptr
+  %a = load <8 x double>, <8 x double>* %A.Ptr, align 8
   %add = fadd <8 x double> %a, %b
   %c  = call <8 x double> @llvm.matrix.transpose(<8 x double> %add, i32 2, i32 4)
   ret <8 x double> %c

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-multiple-iterations.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-multiple-iterations.ll
index 3f342cc00e3b..673955ad8251 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-multiple-iterations.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/propagate-multiple-iterations.ll
@@ -73,9 +73,9 @@ define <16 x double> @backpropagation_iterations(<16 x double>* %A.Ptr, <16 x do
 ; CHECK-NEXT:    [[TMP41:%.*]] = shufflevector <8 x double> [[TMP39]], <8 x double> [[TMP40]], <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
 ; CHECK-NEXT:    ret <16 x double> [[TMP41]]
 ;
-  %A = load <16 x double>, <16 x double>* %A.Ptr
+  %A = load <16 x double>, <16 x double>* %A.Ptr, align 8
   %A.trans = tail call <16 x double> @llvm.matrix.transpose.v16f64(<16 x double> %A, i32 4, i32 4)
-  %B = load <16 x double>, <16 x double>* %B.Ptr
+  %B = load <16 x double>, <16 x double>* %B.Ptr, align 8
   %Mul = fmul <16 x double> %A, %B
   ret <16 x double> %Mul
 }


        


More information about the llvm-commits mailing list