[Mlir-commits] [mlir] f5ed22f - [mlir][VectorToSCF] 128 byte alignment of alloc ops
Jakub Lichman
llvmlistbot at llvm.org
Wed Sep 2 05:38:29 PDT 2020
Author: Jakub Lichman
Date: 2020-09-02T12:37:35Z
New Revision: f5ed22f09dd95c879b57a11c42d2fa7f5ef5e72d
URL: https://github.com/llvm/llvm-project/commit/f5ed22f09dd95c879b57a11c42d2fa7f5ef5e72d
DIFF: https://github.com/llvm/llvm-project/commit/f5ed22f09dd95c879b57a11c42d2fa7f5ef5e72d.diff
LOG: [mlir][VectorToSCF] 128 byte alignment of alloc ops
Added 128 byte alignment to alloc ops created in VectorToSCF pass.
128b alignment was already introduced to this pass but not to all alloc
ops. This commit changes that by adding 128b alignment to the remaining ops.
The point of specifying alignment is to prevent possible memory alignment errors
on weakly tested architectures.
Differential Revision: https://reviews.llvm.org/D86454
Added:
Modified:
mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
Removed:
################################################################################
diff --git a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
index 0f428f887d12..267aea90cc9d 100644
--- a/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
+++ b/mlir/lib/Conversion/VectorToSCF/VectorToSCF.cpp
@@ -35,6 +35,8 @@
#include "mlir/Pass/Pass.h"
#include "mlir/Transforms/Passes.h"
+#define ALIGNMENT_SIZE 128
+
using namespace mlir;
using namespace mlir::edsc;
using namespace mlir::edsc::intrinsics;
@@ -232,8 +234,8 @@ static Value setAllocAtFunctionEntry(MemRefType memRefMinorVectorType,
op->getParentWithTrait<OpTrait::AutomaticAllocationScope>();
assert(scope && "Expected op to be inside automatic allocation scope");
b.setInsertionPointToStart(&scope->getRegion(0).front());
- Value res =
- std_alloca(memRefMinorVectorType, ValueRange{}, b.getI64IntegerAttr(128));
+ Value res = std_alloca(memRefMinorVectorType, ValueRange{},
+ b.getI64IntegerAttr(ALIGNMENT_SIZE));
return res;
}
@@ -575,7 +577,8 @@ LogicalResult VectorTransferRewriter<TransferReadOp>::matchAndRewrite(
steps.push_back(std_constant_index(step));
// 2. Emit alloc-copy-load-dealloc.
- Value tmp = std_alloc(tmpMemRefType(transfer));
+ Value tmp = std_alloc(tmpMemRefType(transfer), ValueRange{},
+ rewriter.getI64IntegerAttr(ALIGNMENT_SIZE));
StdIndexedValue local(tmp);
Value vec = vector_type_cast(tmp);
loopNestBuilder(lbs, ubs, steps, [&](ValueRange loopIvs) {
@@ -648,7 +651,8 @@ LogicalResult VectorTransferRewriter<TransferWriteOp>::matchAndRewrite(
steps.push_back(std_constant_index(step));
// 2. Emit alloc-store-copy-dealloc.
- Value tmp = std_alloc(tmpMemRefType(transfer));
+ Value tmp = std_alloc(tmpMemRefType(transfer), ValueRange{},
+ rewriter.getI64IntegerAttr(ALIGNMENT_SIZE));
StdIndexedValue local(tmp);
Value vec = vector_type_cast(tmp);
std_store(vectorValue, vec);
diff --git a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
index 5c2da799d861..b19ea9dde793 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-loops.mlir
@@ -68,7 +68,7 @@ func @materialize_read(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} {
// CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} {
// CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 {
- // CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32>
+ // CHECK: %[[ALLOC:.*]] = alloc() {alignment = 128 : i64} : memref<5x4x3xf32>
// CHECK-NEXT: scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] {
// CHECK-NEXT: scf.for %[[I5:.*]] = %[[C0]] to %[[C4]] step %[[C1]] {
// CHECK-NEXT: scf.for %[[I6:.*]] = %[[C0]] to %[[C5]] step %[[C1]] {
@@ -145,7 +145,7 @@ func @materialize_write(%M: index, %N: index, %O: index, %P: index) {
// CHECK-NEXT: affine.for %[[I1:.*]] = 0 to %{{.*}} step 4 {
// CHECK-NEXT: affine.for %[[I2:.*]] = 0 to %{{.*}} {
// CHECK-NEXT: affine.for %[[I3:.*]] = 0 to %{{.*}} step 5 {
- // CHECK: %[[ALLOC:.*]] = alloc() : memref<5x4x3xf32>
+ // CHECK: %[[ALLOC:.*]] = alloc() {alignment = 128 : i64} : memref<5x4x3xf32>
// CHECK-NEXT: %[[VECTOR_VIEW:.*]] = vector.type_cast {{.*}} : memref<5x4x3xf32>
// CHECK: store %{{.*}}, {{.*}} : memref<vector<5x4x3xf32>>
// CHECK-NEXT: scf.for %[[I4:.*]] = %[[C0]] to %[[C3]] step %[[C1]] {
More information about the Mlir-commits
mailing list