[llvm] 9ce89b3 - [Matrix] Add volatile load/store tests (NFC).

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 18 02:00:01 PDT 2020


Author: Florian Hahn
Date: 2020-06-18T09:57:13+01:00
New Revision: 9ce89b3b6459f348d0db55195152b9d0935dba94

URL: https://github.com/llvm/llvm-project/commit/9ce89b3b6459f348d0db55195152b9d0935dba94
DIFF: https://github.com/llvm/llvm-project/commit/9ce89b3b6459f348d0db55195152b9d0935dba94.diff

LOG: [Matrix] Add volatile load/store tests (NFC).

Added: 
    llvm/test/Transforms/LowerMatrixIntrinsics/load-align-volatile.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/store-align-volatile.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/load-align-volatile.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/load-align-volatile.ll
new file mode 100644
index 000000000000..98fbdf300dcb
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/load-align-volatile.ll
@@ -0,0 +1,131 @@
+; RUN: opt -lower-matrix-intrinsics -S < %s | FileCheck %s
+; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s
+
+define <9 x double> @strided_load_3x3_volatile(<9 x double>* %in, i64 %stride) {
+; CHECK-LABEL: @strided_load_3x3_volatile(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <9 x double>* [[IN:%.*]] to double*
+; CHECK-NEXT:    [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]]
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START]]
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[VEC_GEP]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP2:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START1]]
+; CHECK-NEXT:    [[VEC_CAST3:%.*]] = bitcast double* [[VEC_GEP2]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST3]], align 8
+; CHECK-NEXT:    [[VEC_START5:%.*]] = mul i64 2, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START5]]
+; CHECK-NEXT:    [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST7]], align 8
+; CHECK-NOT:     = load
+;
+entry:
+  %load = call <9 x double> @llvm.matrix.column.major.load.v9f64(<9 x double>* %in, i64 %stride, i1 true, i32 3, i32 3)
+  ret <9 x double> %load
+}
+
+declare <9 x double> @llvm.matrix.column.major.load.v9f64(<9 x double>*, i64, i1, i32, i32)
+
+define <4 x double> @load_volatile_multiply(<4 x double>* %in) {
+; CHECK-LABEL: @load_volatile_multiply(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x double>* [[IN:%.*]] to double*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[TMP1]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP1]], i64 2
+; CHECK-NEXT:    [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8
+; CHECK-NOT:     = load
+;
+  %in.m = load volatile <4 x double>, <4 x double>* %in, align 8
+  %res = call <4 x double> @llvm.matrix.multiply(<4 x double> %in.m, <4 x double> %in.m, i32 2, i32 2, i32 2)
+  ret <4 x double> %res
+}
+
+declare <4 x double> @llvm.matrix.multiply(<4 x double>, <4 x double>, i32, i32, i32)
+
+
+define <9 x double> @strided_load_3x3_align32(<9 x double>* %in, i64 %stride) {
+; CHECK-LABEL: @strided_load_3x3_align32(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <9 x double>* [[IN:%.*]] to double*
+; CHECK-NEXT:    [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]]
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START]]
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[VEC_GEP]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP2:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START1]]
+; CHECK-NEXT:    [[VEC_CAST3:%.*]] = bitcast double* [[VEC_GEP2]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST3]], align 8
+; CHECK-NEXT:    [[VEC_START5:%.*]] = mul i64 2, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START5]]
+; CHECK-NEXT:    [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST7]], align 8
+; CHECK-NOT:     = load
+;
+entry:
+  %load = call <9 x double> @llvm.matrix.column.major.load.v9f64(<9 x double>* align 32 %in, i64 %stride, i1 false, i32 3, i32 3)
+  ret <9 x double> %load
+}
+
+define <9 x double> @strided_load_3x3_align2(<9 x double>* %in, i64 %stride) {
+; CHECK-LABEL: @strided_load_3x3_align2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <9 x double>* [[IN:%.*]] to double*
+; CHECK-NEXT:    [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]]
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START]]
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[VEC_GEP]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_START1:%.*]] = mul i64 1, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP2:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START1]]
+; CHECK-NEXT:    [[VEC_CAST3:%.*]] = bitcast double* [[VEC_GEP2]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST3]], align 8
+; CHECK-NEXT:    [[VEC_START5:%.*]] = mul i64 2, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP0]], i64 [[VEC_START5]]
+; CHECK-NEXT:    [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <3 x double>*
+; CHECK-NEXT:    load <3 x double>, <3 x double>* [[VEC_CAST7]], align 8
+; CHECK-NOT:     = load
+;
+entry:
+  %load = call <9 x double> @llvm.matrix.column.major.load.v9f64(<9 x double>* align 2 %in, i64 %stride, i1 false, i32 3, i32 3)
+  ret <9 x double> %load
+}
+
+
+define <4 x double> @load_align2_multiply(<4 x double>* %in) {
+; CHECK-LABEL: @load_align2_multiply(
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast <4 x double>* [[IN:%.*]] to double*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[TMP1]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP1]], i64 2
+; CHECK-NEXT:    [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8
+; CHECK-NOT:     = load
+;
+  %in.m = load <4 x double>, <4 x double>* %in, align 2
+  %res = call <4 x double> @llvm.matrix.multiply(<4 x double> %in.m, <4 x double> %in.m, i32 2, i32 2, i32 2)
+  ret <4 x double> %res
+}
+
+define <6 x float> @strided_load_2x3_align16_stride2(<6 x float>* %in) {
+; CHECK-LABEL: @strided_load_2x3_align16_stride2(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <6 x float>* [[IN:%.*]] to float*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast float* [[TMP0]] to <2 x float>*
+; CHECK-NEXT:    [[COL_LOAD:%.*]] = load <2 x float>, <2 x float>* [[VEC_CAST]], align 4
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr float, float* [[TMP0]], i64 2
+; CHECK-NEXT:    [[VEC_CAST1:%.*]] = bitcast float* [[VEC_GEP]] to <2 x float>*
+; CHECK-NEXT:    [[COL_LOAD2:%.*]] = load <2 x float>, <2 x float>* [[VEC_CAST1]], align 4
+; CHECK-NEXT:    [[VEC_GEP3:%.*]] = getelementptr float, float* [[TMP0]], i64 4
+; CHECK-NEXT:    [[VEC_CAST4:%.*]] = bitcast float* [[VEC_GEP3]] to <2 x float>*
+; CHECK-NEXT:    [[COL_LOAD5:%.*]] = load <2 x float>, <2 x float>* [[VEC_CAST4]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x float> [[COL_LOAD]], <2 x float> [[COL_LOAD2]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x float> [[COL_LOAD5]], <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> [[TMP2]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
+; CHECK-NEXT:    ret <6 x float> [[TMP3]]
+;
+entry:
+  %load = call <6 x float> @llvm.matrix.column.major.load.v6f32(<6 x float>* align 16 %in, i64 2, i1 false, i32 2, i32 3)
+  ret <6 x float> %load
+}
+
+declare <6 x float> @llvm.matrix.column.major.load.v6f32(<6 x float>*, i64, i1, i32, i32)

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll
new file mode 100644
index 000000000000..ea7051256a90
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-volatile.ll
@@ -0,0 +1,185 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -lower-matrix-intrinsics -fuse-matrix-tile-size=2 -matrix-allow-contract -force-fuse-matrix -verify-dom-info %s -S | FileCheck %s
+
+; REQUIRES: aarch64-registered-target
+
+target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "aarch64-apple-ios"
+
+define void @multiply_all_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) {
+; CHECK-LABEL: @multiply_all_volatile(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double*
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0
+; CHECK-NEXT:    [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2
+; CHECK-NEXT:    [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double*
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0
+; CHECK-NEXT:    [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>*
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double*
+; CHECK-NEXT:    [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8
+; CHECK-NEXT:    [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2
+; CHECK-NEXT:    [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8
+
+; CHECK:         [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double*
+; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0
+; CHECK-NEXT:    [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>*
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double*
+; CHECK-NEXT:    [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8
+; CHECK-NEXT:    [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2
+; CHECK-NEXT:    [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %a = load volatile <4 x double>, <4 x double>* %A, align 8
+  %b = load volatile <4 x double>, <4 x double>* %B, align 8
+
+  %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+
+  store volatile <4 x double> %c, <4 x double>* %C, align 8
+  ret void
+}
+
+
+define void @multiply_load0_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) {
+; CHECK-LABEL: @multiply_load0_volatile(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double*
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0
+; CHECK-NEXT:    [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2
+; CHECK-NEXT:    [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double*
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0
+; CHECK-NEXT:    [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>*
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double*
+; CHECK-NEXT:    [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8
+; CHECK-NEXT:    [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2
+; CHECK-NEXT:    [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8
+
+; CHECK:         [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double*
+; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0
+; CHECK-NEXT:    [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>*
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double*
+; CHECK-NEXT:    [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8
+; CHECK-NEXT:    [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2
+; CHECK-NEXT:    [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %a = load volatile <4 x double>, <4 x double>* %A, align 8
+  %b = load <4 x double>, <4 x double>* %B, align 8
+
+  %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+
+  store <4 x double> %c, <4 x double>* %C, align 8
+  ret void
+}
+
+define void @multiply_load1_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) {
+; CHECK-LABEL: @multiply_load1_volatile(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double*
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0
+; CHECK-NEXT:    [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2
+; CHECK-NEXT:    [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double*
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0
+; CHECK-NEXT:    [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>*
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double*
+; CHECK-NEXT:    [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8
+; CHECK-NEXT:    [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2
+; CHECK-NEXT:    [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8
+
+; CHECK:         [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double*
+; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0
+; CHECK-NEXT:    [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>*
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double*
+; CHECK-NEXT:    [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8
+; CHECK-NEXT:    [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2
+; CHECK-NEXT:    [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8
+; CHECK-NEXT:    ret void
+;
+
+entry:
+  %a = load <4 x double>, <4 x double>* %A, align 8
+  %b = load volatile <4 x double>, <4 x double>* %B, align 8
+
+  %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+
+  store <4 x double> %c, <4 x double>* %C, align 8
+  ret void
+}
+
+define void @multiply_store_volatile(<4 x double>* noalias %A, <4 x double>* noalias %B, <4 x double>* noalias %C) {
+; CHECK-LABEL: @multiply_store_volatile(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x double>* [[A:%.*]] to double*
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr double, double* [[TMP0]], i64 0
+; CHECK-NEXT:    [[COL_CAST:%.*]] = bitcast double* [[TMP1]] to <4 x double>*
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x double>* [[COL_CAST]] to double*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[TMP2]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST]], align 8
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP2]], i64 2
+; CHECK-NEXT:    [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST1]], align 8
+; CHECK-NEXT:    [[TMP3:%.*]] = bitcast <4 x double>* [[B:%.*]] to double*
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr double, double* [[TMP3]], i64 0
+; CHECK-NEXT:    [[COL_CAST3:%.*]] = bitcast double* [[TMP4]] to <4 x double>*
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast <4 x double>* [[COL_CAST3]] to double*
+; CHECK-NEXT:    [[VEC_CAST4:%.*]] = bitcast double* [[TMP5]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST4]], align 8
+; CHECK-NEXT:    [[VEC_GEP6:%.*]] = getelementptr double, double* [[TMP5]], i64 2
+; CHECK-NEXT:    [[VEC_CAST7:%.*]] = bitcast double* [[VEC_GEP6]] to <2 x double>*
+; CHECK-NEXT:    load <2 x double>, <2 x double>* [[VEC_CAST7]], align 8
+
+; CHECK:         [[TMP18:%.*]] = bitcast <4 x double>* [[C:%.*]] to double*
+; CHECK-NEXT:    [[TMP19:%.*]] = getelementptr double, double* [[TMP18]], i64 0
+; CHECK-NEXT:    [[COL_CAST18:%.*]] = bitcast double* [[TMP19]] to <4 x double>*
+; CHECK-NEXT:    [[TMP20:%.*]] = bitcast <4 x double>* [[COL_CAST18]] to double*
+; CHECK-NEXT:    [[VEC_CAST19:%.*]] = bitcast double* [[TMP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST19]], align 8
+; CHECK-NEXT:    [[VEC_GEP20:%.*]] = getelementptr double, double* [[TMP20]], i64 2
+; CHECK-NEXT:    [[VEC_CAST21:%.*]] = bitcast double* [[VEC_GEP20]] to <2 x double>*
+; CHECK-NEXT:    store <2 x double> {{.*}}, <2 x double>* [[VEC_CAST21]], align 8
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a = load <4 x double>, <4 x double>* %A, align 8
+  %b = load <4 x double>, <4 x double>* %B, align 8
+
+  %c = call <4 x double> @llvm.matrix.multiply(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+
+  store volatile <4 x double> %c, <4 x double>* %C, align 8
+  ret void
+}
+
+declare <4 x double> @llvm.matrix.multiply(<4 x double>, <4 x double>, i32, i32, i32)

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/store-align-volatile.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/store-align-volatile.ll
new file mode 100644
index 000000000000..38b6ee3df3f7
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/store-align-volatile.ll
@@ -0,0 +1,107 @@
+; RUN: opt -lower-matrix-intrinsics -S < %s | FileCheck %s
+; RUN: opt -passes='lower-matrix-intrinsics' -S < %s | FileCheck %s
+
+define void @strided_store_volatile(<6 x i32> %in, i32* %out) {
+; CHECK-LABEL: @strided_store_volatile(
+; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <6 x i32> [[IN:%.*]], <6 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <6 x i32> [[IN]], <6 x i32> undef, <3 x i32> <i32 3, i32 4, i32 5>
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast i32* [[OUT:%.*]] to <3 x i32>*
+; CHECK-NEXT:    store <3 x i32> [[SPLIT]], <3 x i32>* [[VEC_CAST]], align 4
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, i32* [[OUT]], i64 5
+; CHECK-NEXT:    [[VEC_CAST2:%.*]] = bitcast i32* [[VEC_GEP]] to <3 x i32>*
+; CHECK-NEXT:    store <3 x i32> [[SPLIT1]], <3 x i32>* [[VEC_CAST2]], align 4
+; CHECK-NEXT:    ret void
+;
+  call void @llvm.matrix.column.major.store(<6 x i32> %in, i32* %out, i64 5, i1 true, i32 3, i32 2)
+  ret void
+}
+
+declare void @llvm.matrix.column.major.store(<6 x i32>, i32*, i64, i1, i32, i32)
+
+
+define void @multiply_store_volatile(<4 x i32> %in, <4 x i32>* %out) {
+; CHECK-LABEL: @multiply_store_volatile(
+; CHECK:         [[TMP29:%.*]] = bitcast <4 x i32>* %out to i32*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast i32* [[TMP29]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> {{.*}}, <2 x i32>* [[VEC_CAST]], align 4
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, i32* [[TMP29]], i64 2
+; CHECK-NEXT:    [[VEC_CAST25:%.*]] = bitcast i32* [[VEC_GEP]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> {{.*}}, <2 x i32>* [[VEC_CAST25]], align 4
+; CHECK-NEXT:    ret void
+;
+  %res = call <4 x i32> @llvm.matrix.multiply(<4 x i32> %in, <4 x i32> %in, i32 2, i32 2, i32 2)
+  store volatile <4 x i32> %res, <4 x i32>* %out, align 4
+  ret void
+}
+
+declare <4 x i32> @llvm.matrix.multiply(<4 x i32>, <4 x i32>, i32, i32, i32)
+
+define void @strided_store_align32(<6 x i32> %in, i64 %stride, i32* %out) {
+; CHECK-LABEL: @strided_store_align32(
+; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <6 x i32> [[IN:%.*]], <6 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <6 x i32> [[IN]], <6 x i32> undef, <3 x i32> <i32 3, i32 4, i32 5>
+; CHECK-NEXT:    [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]]
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, i32* [[OUT:%.*]], i64 [[VEC_START]]
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast i32* [[VEC_GEP]] to <3 x i32>*
+; CHECK-NEXT:    store <3 x i32> [[SPLIT]], <3 x i32>* [[VEC_CAST]], align 4
+; CHECK-NEXT:    [[VEC_START2:%.*]] = mul i64 1, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP3:%.*]] = getelementptr i32, i32* [[OUT]], i64 [[VEC_START2]]
+; CHECK-NEXT:    [[VEC_CAST4:%.*]] = bitcast i32* [[VEC_GEP3]] to <3 x i32>*
+; CHECK-NEXT:    store <3 x i32> [[SPLIT1]], <3 x i32>* [[VEC_CAST4]], align 4
+; CHECK-NEXT:    ret void
+;
+  call void @llvm.matrix.column.major.store(<6 x i32> %in, i32* align 32 %out, i64 %stride, i1 true, i32 3, i32 2)
+  ret void
+}
+
+define void @strided_store_align2(<6 x i32> %in, i64 %stride, i32* %out) {
+; CHECK-LABEL: @strided_store_align2(
+; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <6 x i32> [[IN:%.*]], <6 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <6 x i32> [[IN]], <6 x i32> undef, <3 x i32> <i32 3, i32 4, i32 5>
+; CHECK-NEXT:    [[VEC_START:%.*]] = mul i64 0, [[STRIDE:%.*]]
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, i32* [[OUT:%.*]], i64 [[VEC_START]]
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast i32* [[VEC_GEP]] to <3 x i32>*
+; CHECK-NEXT:    store <3 x i32> [[SPLIT]], <3 x i32>* [[VEC_CAST]], align 4
+; CHECK-NEXT:    [[VEC_START2:%.*]] = mul i64 1, [[STRIDE]]
+; CHECK-NEXT:    [[VEC_GEP3:%.*]] = getelementptr i32, i32* [[OUT]], i64 [[VEC_START2]]
+; CHECK-NEXT:    [[VEC_CAST4:%.*]] = bitcast i32* [[VEC_GEP3]] to <3 x i32>*
+; CHECK-NEXT:    store <3 x i32> [[SPLIT1]], <3 x i32>* [[VEC_CAST4]], align 4
+; CHECK-NEXT:    ret void
+;
+  call void @llvm.matrix.column.major.store(<6 x i32> %in, i32* align 2 %out, i64 %stride, i1 true, i32 3, i32 2)
+  ret void
+}
+
+define void @multiply_store_align16_stride8(<4 x i32> %in, <4 x i32>* %out) {
+; CHECK-LABEL: @multiply_store_align16_stride8(
+; CHECK:         [[TMP29:%.*]] = bitcast <4 x i32>* %out to i32*
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast i32* [[TMP29]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> {{.*}}, <2 x i32>* [[VEC_CAST]], align 4
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, i32* [[TMP29]], i64 2
+; CHECK-NEXT:    [[VEC_CAST25:%.*]] = bitcast i32* [[VEC_GEP]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> {{.*}}, <2 x i32>* [[VEC_CAST25]], align 4
+; CHECK-NEXT:    ret void
+;
+  %res = call <4 x i32> @llvm.matrix.multiply(<4 x i32> %in, <4 x i32> %in, i32 2, i32 2, i32 2)
+  store <4 x i32> %res, <4 x i32>* %out, align 16
+  ret void
+}
+
+define void @strided_store_align8_stride12(<6 x i32> %in, i32* %out) {
+; CHECK-LABEL: @strided_store_align8_stride12(
+; CHECK-NEXT:    [[SPLIT:%.*]] = shufflevector <6 x i32> [[IN:%.*]], <6 x i32> undef, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT:    [[SPLIT1:%.*]] = shufflevector <6 x i32> [[IN]], <6 x i32> undef, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT:    [[SPLIT2:%.*]] = shufflevector <6 x i32> [[IN]], <6 x i32> undef, <2 x i32> <i32 4, i32 5>
+; CHECK-NEXT:    [[VEC_CAST:%.*]] = bitcast i32* [[OUT:%.*]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> [[SPLIT]], <2 x i32>* [[VEC_CAST]], align 4
+; CHECK-NEXT:    [[VEC_GEP:%.*]] = getelementptr i32, i32* [[OUT]], i64 3
+; CHECK-NEXT:    [[VEC_CAST3:%.*]] = bitcast i32* [[VEC_GEP]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> [[SPLIT1]], <2 x i32>* [[VEC_CAST3]], align 4
+; CHECK-NEXT:    [[VEC_GEP4:%.*]] = getelementptr i32, i32* [[OUT]], i64 6
+; CHECK-NEXT:    [[VEC_CAST5:%.*]] = bitcast i32* [[VEC_GEP4]] to <2 x i32>*
+; CHECK-NEXT:    store <2 x i32> [[SPLIT2]], <2 x i32>* [[VEC_CAST5]], align 4
+; CHECK-NEXT:    ret void
+;
+  call void @llvm.matrix.column.major.store(<6 x i32> %in, i32* align 8 %out, i64 3, i1 false, i32 2, i32 3)
+  ret void
+}


        


More information about the llvm-commits mailing list