[llvm] 39f2d9a - [Matrix] Add option to use row-major matrix layout as default.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 6 02:06:27 PDT 2020


Author: Florian Hahn
Date: 2020-04-06T10:00:56+01:00
New Revision: 39f2d9aa81a89510ff41151e3fbe329c06218872

URL: https://github.com/llvm/llvm-project/commit/39f2d9aa81a89510ff41151e3fbe329c06218872
DIFF: https://github.com/llvm/llvm-project/commit/39f2d9aa81a89510ff41151e3fbe329c06218872.diff

LOG: [Matrix] Add option to use row-major matrix layout as default.

This patch adds a -matrix-default-layout option which can be used to
set the default matrix layout to row-major or column-major (default).

The initial patch updates codegen for loads, stores, binary operators
and matrix multiply.

Reviewers: anemet, Gerolf, andrew.w.kaylor, LuoYuanke

Reviewed By: anemet

Differential Revision: https://reviews.llvm.org/D76325

Added: 
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-double-row-major.ll
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-i32-row-major.ll

Modified: 
    llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
    llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
index e0110576e4b0..81ac2b423c91 100644
--- a/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
+++ b/llvm/lib/Transforms/Scalar/LowerMatrixIntrinsics.cpp
@@ -67,6 +67,16 @@ static cl::opt<bool> AllowContractEnabled(
     cl::desc("Allow the use of FMAs if available and profitable. This may "
              "result in 
diff erent results, due to less rounding error."));
 
+enum class MatrixLayoutTy { ColumnMajor, RowMajor };
+
+static cl::opt<MatrixLayoutTy> MatrixLayout(
+    "matrix-default-layout", cl::init(MatrixLayoutTy::ColumnMajor),
+    cl::desc("Sets the default matrix layout"),
+    cl::values(clEnumValN(MatrixLayoutTy::ColumnMajor, "column-major",
+                          "Use column-major layout"),
+               clEnumValN(MatrixLayoutTy::RowMajor, "row-major",
+                          "Use row-major layout")));
+
 /// Helper function to either return Scope, if it is a subprogram or the
 /// attached subprogram for a local scope.
 static DISubprogram *getSubprogram(DIScope *Scope) {
@@ -77,12 +87,17 @@ static DISubprogram *getSubprogram(DIScope *Scope) {
 
 namespace {
 
-// Given an element poitner \p BasePtr to the start of a (sub) matrix, compute
-// the start address of column \p Col with type (\p EltType x \p NumRows)
-// assuming \p Stride elements between start two consecutive columns.
-// \p Stride must be >= \p NumRows.
+// Given an element pointer \p BasePtr to the start of a (sub) matrix, compute
+// the start address of vector \p VecIdx with type (\p EltType x \p NumElements)
+// assuming \p Stride elements between start two consecutive vectors.
+// \p Stride must be >= \p NumElements.
+// For column-major matrixes, the function computes the address of a column
+// vectors and \p NumElements must be set to the number of elements in a column
+// (= number of rows of the matrix). For row-major matrixes, the function
+// computes the address of a row vector and \p NumElements must be set to the
+// number of elements in a column (= number of columns of the matrix).
 //
-// Consider a 4x4 matrix like below
+// Consider a 4x4 matrix in column-mjaor layout like below
 //
 //      0       1      2      3
 // 0   v_0_0  v_0_1  v_0_2  v_0_3
@@ -92,14 +107,14 @@ namespace {
 
 // To compute the column addresses for a 2x3 sub-matrix at row 1 and column 1,
 // we need a pointer to the first element of the submatrix as base pointer.
-// Then we can use computeColumnAddr to compute the addresses for the columns
+// Then we can use computeVectorAddr to compute the addresses for the columns
 // of the sub-matrix.
 //
-// Column 0: computeColumnAddr(Base, 0 (column), 4 (stride), 2 (num rows), ..)
+// Column 0: computeVectorAddr(Base, 0 (column), 4 (stride), 2 (num rows), ..)
 //           -> just returns Base
-// Column 1: computeColumnAddr(Base, 1 (column), 4 (stride), 2 (num rows), ..)
+// Column 1: computeVectorAddr(Base, 1 (column), 4 (stride), 2 (num rows), ..)
 //           -> returns Base + (1 * 4)
-// Column 2: computeColumnAddr(Base, 2 (column), 4 (stride), 2 (num rows), ..)
+// Column 2: computeVectorAddr(Base, 2 (column), 4 (stride), 2 (num rows), ..)
 //           -> returns Base + (2 * 4)
 //
 // The graphic below illustrates the number of elements in a column (marked
@@ -112,30 +127,30 @@ namespace {
 //         v_2_0 |v_2_1 |v_2_2 |v_2_3
 //         v_3_0 {v_3_1 {v_3_2  v_3_3
 //
-Value *computeColumnAddr(Value *BasePtr, Value *Col, Value *Stride,
-                         unsigned NumRows, Type *EltType,
+Value *computeVectorAddr(Value *BasePtr, Value *VecIdx, Value *Stride,
+                         unsigned NumElements, Type *EltType,
                          IRBuilder<> &Builder) {
 
   assert((!isa<ConstantInt>(Stride) ||
-          cast<ConstantInt>(Stride)->getZExtValue() >= NumRows) &&
-         "Stride must be >= the number of rows.");
+          cast<ConstantInt>(Stride)->getZExtValue() >= NumElements) &&
+         "Stride must be >= the number of elements in the result vector.");
   unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
 
-  // Compute the start of the column with index Col as Col * Stride.
-  Value *ColumnStart = Builder.CreateMul(Col, Stride, "col.start");
+  // Compute the start of the vector with index VecIdx as VecIdx * Stride.
+  Value *VecStart = Builder.CreateMul(VecIdx, Stride, "vec.start");
 
-  // Get pointer to the start of the selected column. Skip GEP creation,
-  // if we select column 0.
-  if (isa<ConstantInt>(ColumnStart) && cast<ConstantInt>(ColumnStart)->isZero())
-    ColumnStart = BasePtr;
+  // Get pointer to the start of the selected vector. Skip GEP creation,
+  // if we select vector 0.
+  if (isa<ConstantInt>(VecStart) && cast<ConstantInt>(VecStart)->isZero())
+    VecStart = BasePtr;
   else
-    ColumnStart = Builder.CreateGEP(EltType, BasePtr, ColumnStart, "col.gep");
+    VecStart = Builder.CreateGEP(EltType, BasePtr, VecStart, "vec.gep");
 
-  // Cast elementwise column start pointer to a pointer to a column
-  // (EltType x NumRows)*.
-  Type *ColumnType = VectorType::get(EltType, NumRows);
-  Type *ColumnPtrType = PointerType::get(ColumnType, AS);
-  return Builder.CreatePointerCast(ColumnStart, ColumnPtrType, "col.cast");
+  // Cast elementwise vector start pointer to a pointer to a vector
+  // (EltType x NumElements)*.
+  Type *VecType = VectorType::get(EltType, NumElements);
+  Type *VecPtrType = PointerType::get(VecType, AS);
+  return Builder.CreatePointerCast(VecStart, VecPtrType, "vec.cast");
 }
 
 /// LowerMatrixIntrinsics contains the methods used to lower matrix intrinsics.
@@ -143,7 +158,8 @@ Value *computeColumnAddr(Value *BasePtr, Value *Col, Value *Stride,
 /// Currently, the lowering for each matrix intrinsic is done as follows:
 /// 1. Propagate the shape information from intrinsics to connected
 /// instructions.
-/// 2. Lower instructions with shape information.
+/// 2. Lower instructions with shape information (assuming column-major layout).
+///  The lowering works similarly using row-major layout.
 ///  2.1. Get column vectors for each argument. If we already lowered the
 ///       definition of an argument, use the produced column vectors directly.
 ///       If not, split the operand vector containing an embedded matrix into
@@ -196,20 +212,41 @@ class LowerMatrixIntrinsics {
     bool IsColumnMajor = true;
 
   public:
-    MatrixTy() : Vectors() {}
+    MatrixTy()
+        : Vectors(),
+          IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
     MatrixTy(ArrayRef<Value *> Vectors)
-        : Vectors(Vectors.begin(), Vectors.end()) {}
+        : Vectors(Vectors.begin(), Vectors.end()),
+          IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
+    MatrixTy(unsigned NumRows, unsigned NumColumns, Type *EltTy)
+        : IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {
+
+      unsigned D = isColumnMajor() ? NumColumns : NumRows;
+      for (unsigned J = 0; J < D; ++J)
+        addVector(UndefValue::get(
+            VectorType::get(EltTy, isColumnMajor() ? NumRows : NumColumns)));
+    }
 
     Value *getVector(unsigned i) const { return Vectors[i]; }
     Value *getColumn(unsigned i) const {
       assert(isColumnMajor() && "only supported for column-major matrixes");
       return Vectors[i];
     }
+    Value *getRow(unsigned i) const {
+      assert(!isColumnMajor() && "only supported for row-major matrixes");
+      return Vectors[i];
+    }
 
-    void setColumn(unsigned i, Value *V) { Vectors[i] = V; }
+    void setVector(unsigned i, Value *V) { Vectors[i] = V; }
 
     Type *getElementType() { return getVectorTy()->getElementType(); }
 
+    unsigned getNumVectors() const {
+      if (isColumnMajor())
+        return getNumColumns();
+      return getNumRows();
+    }
+
     unsigned getNumColumns() const {
       if (isColumnMajor())
         return Vectors.size();
@@ -226,12 +263,7 @@ class LowerMatrixIntrinsics {
         return Vectors.size();
     }
 
-    const SmallVectorImpl<Value *> &getColumnVectors() const { return Vectors; }
-
-    SmallVectorImpl<Value *> &getColumnVectors() { return Vectors; }
-
-    void addColumn(Value *V) { Vectors.push_back(V); }
-
+    void addVector(Value *V) { Vectors.push_back(V); }
     VectorType *getColumnTy() {
       assert(isColumnMajor() && "only supported for column-major matrixes");
       return getVectorTy();
@@ -242,10 +274,16 @@ class LowerMatrixIntrinsics {
     }
 
     iterator_range<SmallVector<Value *, 8>::iterator> columns() {
+      assert(isColumnMajor() &&
+             "columns() only supported for column-major matrixes");
+      return make_range(Vectors.begin(), Vectors.end());
+    }
+
+    iterator_range<SmallVector<Value *, 8>::iterator> vectors() {
       return make_range(Vectors.begin(), Vectors.end());
     }
 
-    /// Embed the columns of the matrix into a flat vector by concatenating
+    /// Embed the vectors of the matrix into a flat vector by concatenating
     /// them.
     Value *embedInVector(IRBuilder<> &Builder) const {
       return Vectors.size() == 1 ? Vectors[0]
@@ -276,18 +314,39 @@ class LowerMatrixIntrinsics {
     const OpInfoTy &getOpInfo() const { return OpInfo; }
 
     bool isColumnMajor() const { return IsColumnMajor; }
+
+    unsigned getStride() const {
+      if (isColumnMajor())
+        return getNumRows();
+      return getNumColumns();
+    }
+
+    /// Extract a vector of \p NumElts starting at index (\p I, \p J). If the
+    /// matrix is column-major, the result vector is extracted from a column
+    /// vector, otherwise from a row vector.
+    Value *extractVector(unsigned I, unsigned J, unsigned NumElts,
+                         IRBuilder<> &Builder) const {
+      Value *Vec = isColumnMajor() ? getColumn(J) : getRow(I);
+      Value *Undef = UndefValue::get(Vec->getType());
+      Constant *Mask =
+          createSequentialMask(Builder, isColumnMajor() ? I : J, NumElts, 0);
+      return Builder.CreateShuffleVector(Vec, Undef, Mask, "block");
+    }
   };
 
   struct ShapeInfo {
     unsigned NumRows;
     unsigned NumColumns;
 
+    bool IsColumnMajor;
+
     ShapeInfo(unsigned NumRows = 0, unsigned NumColumns = 0)
-        : NumRows(NumRows), NumColumns(NumColumns) {}
+        : NumRows(NumRows), NumColumns(NumColumns),
+          IsColumnMajor(MatrixLayout == MatrixLayoutTy::ColumnMajor) {}
 
     ShapeInfo(Value *NumRows, Value *NumColumns)
-        : NumRows(cast<ConstantInt>(NumRows)->getZExtValue()),
-          NumColumns(cast<ConstantInt>(NumColumns)->getZExtValue()) {}
+        : ShapeInfo(cast<ConstantInt>(NumRows)->getZExtValue(),
+                    cast<ConstantInt>(NumColumns)->getZExtValue()) {}
 
     bool operator==(const ShapeInfo &other) {
       return NumRows == other.NumRows && NumColumns == other.NumColumns;
@@ -300,6 +359,18 @@ class LowerMatrixIntrinsics {
       assert(NumRows == 0 || NumColumns != 0);
       return NumRows != 0;
     }
+
+    unsigned getStride() const {
+      if (IsColumnMajor)
+        return NumRows;
+      return NumColumns;
+    }
+
+    unsigned getNumVectors() const {
+      if (IsColumnMajor)
+        return NumColumns;
+      return NumRows;
+    }
   };
 
   /// Maps instructions to their shape information. The shape information
@@ -339,11 +410,11 @@ class LowerMatrixIntrinsics {
                      double(TTI.getRegisterBitWidth(true)));
   }
 
-  /// Return the set of column vectors that a matrix value is lowered to.
+  /// Return the set of vectors that a matrix value is lowered to.
   ///
-  /// If we lowered \p MatrixVal, just return the cache result column matrix.
-  /// Otherwie split the flat vector \p MatrixVal containing a matrix with
-  /// shape \p SI into column vectors.
+  /// If we lowered \p MatrixVal, just return the cache result matrix. Otherwise
+  /// split the flat vector \p MatrixVal containing a matrix with shape \p SI
+  /// into vectors.
   MatrixTy getMatrix(Value *MatrixVal, const ShapeInfo &SI,
                      IRBuilder<> &Builder) {
     VectorType *VType = dyn_cast<VectorType>(MatrixVal->getType());
@@ -352,7 +423,7 @@ class LowerMatrixIntrinsics {
            "The vector size must match the number of matrix elements");
 
     // Check if we lowered MatrixVal using shape information. In that case,
-    // return the existing column matrix, if it matches the requested shape
+    // return the existing matrix, if it matches the requested shape
     // information. If there is a mis-match, embed the result in a flat
     // vector and split it later.
     auto Found = Inst2ColumnMatrix.find(MatrixVal);
@@ -370,8 +441,9 @@ class LowerMatrixIntrinsics {
     SmallVector<Value *, 16> SplitVecs;
     Value *Undef = UndefValue::get(VType);
     for (unsigned MaskStart = 0; MaskStart < VType->getNumElements();
-         MaskStart += SI.NumRows) {
-      Constant *Mask = createSequentialMask(Builder, MaskStart, SI.NumRows, 0);
+         MaskStart += SI.getStride()) {
+      Constant *Mask =
+          createSequentialMask(Builder, MaskStart, SI.getStride(), 0);
       Value *V = Builder.CreateShuffleVector(MatrixVal, Undef, Mask, "split");
       SplitVecs.push_back(V);
     }
@@ -660,19 +732,18 @@ class LowerMatrixIntrinsics {
     return Changed;
   }
 
-  LoadInst *createColumnLoad(Value *ColumnPtr, Type *EltType,
+  LoadInst *createVectorLoad(Value *ColumnPtr, Type *EltType,
                              IRBuilder<> &Builder) {
     return Builder.CreateAlignedLoad(
         ColumnPtr, Align(DL.getABITypeAlignment(EltType)), "col.load");
   }
 
-  StoreInst *createColumnStore(Value *ColumnValue, Value *ColumnPtr,
+  StoreInst *createVectorStore(Value *ColumnValue, Value *ColumnPtr,
                                Type *EltType, IRBuilder<> &Builder) {
     return Builder.CreateAlignedStore(ColumnValue, ColumnPtr,
                                       DL.getABITypeAlign(EltType));
   }
 
-
   /// Turns \p BasePtr into an elementwise pointer to \p EltType.
   Value *createElementPtr(Value *BasePtr, Type *EltType, IRBuilder<> &Builder) {
     unsigned AS = cast<PointerType>(BasePtr->getType())->getAddressSpace();
@@ -705,22 +776,21 @@ class LowerMatrixIntrinsics {
   }
 
   /// Load a matrix with \p Shape starting at \p Ptr and using \p Stride between
-  /// columns.
+  /// vectors.
   MatrixTy loadMatrix(Type *Ty, Value *Ptr, Value *Stride, ShapeInfo Shape,
                       IRBuilder<> &Builder) {
     auto VType = cast<VectorType>(Ty);
     Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
     MatrixTy Result;
-    // Distance between start of one column and the start of the next
-    for (unsigned C = 0, E = Shape.NumColumns; C < E; ++C) {
-      Value *GEP =
-          computeColumnAddr(EltPtr, Builder.getInt32(C), Stride, Shape.NumRows,
-                            VType->getElementType(), Builder);
-      Value *Column = createColumnLoad(GEP, VType->getElementType(), Builder);
-      Result.addColumn(Column);
+    for (unsigned I = 0, E = Shape.getNumVectors(); I < E; ++I) {
+      Value *GEP = computeVectorAddr(EltPtr, Builder.getInt32(I), Stride,
+                                     Shape.getStride(), VType->getElementType(),
+                                     Builder);
+      Value *Vector = createVectorLoad(GEP, VType->getElementType(), Builder);
+      Result.addVector(Vector);
     }
-    return Result.addNumLoads(getNumOps(Result.getColumnTy()) *
-                              Result.getNumColumns());
+    return Result.addNumLoads(getNumOps(Result.getVectorTy()) *
+                              Result.getNumVectors());
   }
 
   /// Loads a sub-matrix with shape \p ResultShape from a \p R x \p C matrix,
@@ -731,7 +801,7 @@ class LowerMatrixIntrinsics {
 
     Value *Offset = Builder.CreateAdd(
         Builder.CreateMul(Builder.getInt32(J),
-                          Builder.getInt32(MatrixShape.NumRows)),
+                          Builder.getInt32(MatrixShape.getStride())),
         Builder.getInt32(I));
 
     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
@@ -744,8 +814,9 @@ class LowerMatrixIntrinsics {
     Value *TilePtr =
         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
 
-    return loadMatrix(TileTy, TilePtr, Builder.getInt32(MatrixShape.NumRows),
-                      ResultShape, Builder);
+    return loadMatrix(TileTy, TilePtr,
+                      Builder.getInt32(MatrixShape.getStride()), ResultShape,
+                      Builder);
   }
 
   /// Lower a load instruction with shape information.
@@ -761,6 +832,8 @@ class LowerMatrixIntrinsics {
   ///
   /// The intrinsic loads a matrix from memory using a stride between columns.
   void LowerColumnwiseLoad(CallInst *Inst) {
+    assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
+           "Intrinsic only supports column-major layout!");
     Value *Ptr = Inst->getArgOperand(0);
     Value *Stride = Inst->getArgOperand(1);
     LowerLoad(Inst, Ptr, Stride,
@@ -774,7 +847,7 @@ class LowerMatrixIntrinsics {
                    IRBuilder<> &Builder) {
     Value *Offset = Builder.CreateAdd(
         Builder.CreateMul(Builder.getInt32(J),
-                          Builder.getInt32(MatrixShape.NumRows)),
+                          Builder.getInt32(MatrixShape.getStride())),
         Builder.getInt32(I));
 
     unsigned AS = cast<PointerType>(MatrixPtr->getType())->getAddressSpace();
@@ -788,23 +861,23 @@ class LowerMatrixIntrinsics {
         Builder.CreatePointerCast(TileStart, TilePtrTy, "col.cast");
 
     storeMatrix(TileTy, StoreVal, TilePtr,
-                Builder.getInt32(MatrixShape.NumRows), Builder);
+                Builder.getInt32(MatrixShape.getStride()), Builder);
   }
 
   /// Store matrix \p StoreVal starting at \p Ptr and using \p Stride between
-  /// columns.
+  /// vectors.
   MatrixTy storeMatrix(Type *Ty, MatrixTy StoreVal, Value *Ptr, Value *Stride,
                        IRBuilder<> &Builder) {
     auto VType = cast<VectorType>(Ty);
     Value *EltPtr = createElementPtr(Ptr, VType->getElementType(), Builder);
-    for (auto C : enumerate(StoreVal.columns())) {
-      Value *GEP = computeColumnAddr(EltPtr, Builder.getInt32(C.index()),
-                                     Stride, StoreVal.getNumRows(),
+    for (auto Vec : enumerate(StoreVal.vectors())) {
+      Value *GEP = computeVectorAddr(EltPtr, Builder.getInt32(Vec.index()),
+                                     Stride, StoreVal.getStride(),
                                      VType->getElementType(), Builder);
-      createColumnStore(C.value(), GEP, VType->getElementType(), Builder);
+      createVectorStore(Vec.value(), GEP, VType->getElementType(), Builder);
     }
-    return MatrixTy().addNumStores(getNumOps(StoreVal.getColumnTy()) *
-                                   StoreVal.getNumColumns());
+    return MatrixTy().addNumStores(getNumOps(StoreVal.getVectorTy()) *
+                                   StoreVal.getNumVectors());
   }
 
   /// Lower a store instruction with shape information.
@@ -821,6 +894,8 @@ class LowerMatrixIntrinsics {
   ///
   /// The intrinsic store a matrix back memory using a stride between columns.
   void LowerColumnwiseStore(CallInst *Inst) {
+    assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
+           "Intrinsic only supports column-major layout!");
     Value *Matrix = Inst->getArgOperand(0);
     Value *Ptr = Inst->getArgOperand(1);
     Value *Stride = Inst->getArgOperand(2);
@@ -828,16 +903,6 @@ class LowerMatrixIntrinsics {
                {Inst->getArgOperand(3), Inst->getArgOperand(4)});
   }
 
-  /// Extract a column vector of \p NumElts starting at index (\p I, \p J) from
-  /// the matrix \p LM represented as a vector of column vectors.
-  Value *extractVector(const MatrixTy &LM, unsigned I, unsigned J,
-                       unsigned NumElts, IRBuilder<> &Builder) {
-    Value *Col = LM.getColumn(J);
-    Value *Undef = UndefValue::get(Col->getType());
-    Constant *Mask = createSequentialMask(Builder, I, NumElts, 0);
-    return Builder.CreateShuffleVector(Col, Undef, Mask, "block");
-  }
-
   // Set elements I..I+NumElts-1 to Block
   Value *insertVector(Value *Col, unsigned I, Value *Block,
                       IRBuilder<> &Builder) {
@@ -931,33 +996,65 @@ class LowerMatrixIntrinsics {
     unsigned C = Result.getNumColumns();
     unsigned M = A.getNumColumns();
 
-    for (unsigned J = 0; J < C; ++J) {
-      unsigned BlockSize = VF;
-
-      // If Result is zero, we don't need to accumulate in the K==0 iteration.
-      bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
-
-      unsigned NumOps = 0;
-      for (unsigned I = 0; I < R; I += BlockSize) {
-        // Gradually lower the vectorization factor to cover the remainder.
-        while (I + BlockSize > R)
-          BlockSize /= 2;
-
-        Value *Sum =
-            isTiled ? extractVector(Result, I, J, BlockSize, Builder) : nullptr;
-        for (unsigned K = 0; K < M; ++K) {
-          Value *L = extractVector(A, I, K, BlockSize, Builder);
-          Value *RH = Builder.CreateExtractElement(B.getColumn(J), K);
-          Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat");
-          Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat,
-                             Result.getElementType()->isFloatingPointTy(),
-                             Builder, AllowContraction, NumOps);
+    bool IsFP = Result.getElementType()->isFloatingPointTy();
+    assert(A.isColumnMajor() == B.isColumnMajor() &&
+           Result.isColumnMajor() == A.isColumnMajor() &&
+           "operands must agree on matrix layout");
+    unsigned NumComputeOps = 0;
+    if (A.isColumnMajor()) {
+      // Multiply columns from the first operand with scalars from the second
+      // operand. Then move along the K axes and accumulate the columns.  With
+      // this the adds can be vectorized without reassociation.
+      for (unsigned J = 0; J < C; ++J) {
+        unsigned BlockSize = VF;
+        // If Result is zero, we don't need to accumulate in the K==0 iteration.
+        bool isSumZero = isa<ConstantAggregateZero>(Result.getColumn(J));
+
+        for (unsigned I = 0; I < R; I += BlockSize) {
+          // Gradually lower the vectorization factor to cover the remainder.
+          while (I + BlockSize > R)
+            BlockSize /= 2;
+
+          Value *Sum = isTiled ? Result.extractVector(I, J, BlockSize, Builder)
+                               : nullptr;
+          for (unsigned K = 0; K < M; ++K) {
+            Value *L = A.extractVector(I, K, BlockSize, Builder);
+            Value *RH = Builder.CreateExtractElement(B.getColumn(J), K);
+            Value *Splat = Builder.CreateVectorSplat(BlockSize, RH, "splat");
+            Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, L, Splat,
+                               Result.getElementType()->isFloatingPointTy(),
+                               Builder, AllowContraction, NumComputeOps);
+          }
+          Result.setVector(J,
+                           insertVector(Result.getVector(J), I, Sum, Builder));
+        }
+      }
+    } else {
+      // Multiply rows from the second operand with scalars from the first
+      // operand. Then move along the K axes and accumulate the rows.  With this
+      // the adds can be vectorized without reassociation.
+      for (unsigned I = 0; I < R; ++I) {
+        unsigned BlockSize = VF;
+        bool isSumZero = isa<ConstantAggregateZero>(Result.getRow(I));
+        for (unsigned J = 0; J < C; J += BlockSize) {
+          // Gradually lower the vectorization factor to cover the remainder.
+          while (J + BlockSize > C)
+            BlockSize /= 2;
+
+          Value *Sum = nullptr;
+          for (unsigned K = 0; K < M; ++K) {
+            Value *R = B.extractVector(K, J, BlockSize, Builder);
+            Value *LH = Builder.CreateExtractElement(A.getVector(I), K);
+            Value *Splat = Builder.CreateVectorSplat(BlockSize, LH, "splat");
+            Sum = createMulAdd(isSumZero && K == 0 ? nullptr : Sum, Splat, R,
+                               IsFP, Builder, AllowContraction, NumComputeOps);
+          }
+          Result.setVector(I,
+                           insertVector(Result.getVector(I), J, Sum, Builder));
         }
-        Result.setColumn(J, insertVector(Result.getColumn(J), I, Sum, Builder));
       }
-
-      Result.addNumComputeOps(NumOps);
     }
+    Result.addNumComputeOps(NumComputeOps);
   }
 
   /// Ensure that the memory in \p Load does not alias \p Store by potentially
@@ -1081,13 +1178,15 @@ class LowerMatrixIntrinsics {
     MatrixTy Res;
     Type *ColumType = VectorType::get(EltType, R);
     for (unsigned I = 0; I < C; ++I)
-      Res.addColumn(ConstantAggregateZero::get(ColumType));
+      Res.addVector(ConstantAggregateZero::get(ColumType));
     return Res;
   }
 
   void emitSIMDTiling(CallInst *MatMul, LoadInst *LoadOp0, LoadInst *LoadOp1,
                       StoreInst *Store,
                       SmallPtrSetImpl<Instruction *> &FusedInsts) {
+    assert(MatrixLayout == MatrixLayoutTy::ColumnMajor &&
+           "Tiling only supported for column-major matrixes at the moment!");
     if (!isFusionProfitable(MatMul))
       return;
 
@@ -1147,7 +1246,8 @@ class LowerMatrixIntrinsics {
   /// are completely eliminated by fusion are added to \p FusedInsts.
   void LowerMatrixMultiplyFused(CallInst *MatMul,
                                 SmallPtrSetImpl<Instruction *> &FusedInsts) {
-    if (!FuseMatrix || !MatMul->hasOneUse())
+    if (!FuseMatrix || !MatMul->hasOneUse() ||
+        MatrixLayout != MatrixLayoutTy::ColumnMajor)
       return;
 
     auto *LoadOp0 = dyn_cast<LoadInst>(MatMul->getOperand(0));
@@ -1181,9 +1281,7 @@ class LowerMatrixIntrinsics {
     assert(LShape.NumColumns == RShape.NumRows);
 
     // Initialize the output
-    MatrixTy Result;
-    for (unsigned J = 0; J < C; ++J)
-      Result.addColumn(UndefValue::get(VectorType::get(EltType, R)));
+    MatrixTy Result(R, C, EltType);
 
     bool AllowContract = AllowContractEnabled || (isa<FPMathOperator>(MatMul) &&
                                                   MatMul->hasAllowContract());
@@ -1199,6 +1297,8 @@ class LowerMatrixIntrinsics {
     VectorType *VectorTy = cast<VectorType>(InputVal->getType());
     ShapeInfo ArgShape(Inst->getArgOperand(1), Inst->getArgOperand(2));
     MatrixTy InputMatrix = getMatrix(InputVal, ArgShape, Builder);
+    assert(InputMatrix.isColumnMajor() &&
+           "Row-major code-gen not supported yet!");
 
     for (unsigned Row = 0; Row < ArgShape.NumRows; ++Row) {
       // Build a single column vector for this row. First initialize it.
@@ -1214,7 +1314,7 @@ class LowerMatrixIntrinsics {
         ResultColumn =
             Builder.CreateInsertElement(ResultColumn, Elt, C.index());
       }
-      Result.addColumn(ResultColumn);
+      Result.addVector(ResultColumn);
     }
 
     // TODO: Improve estimate of operations needed for transposes. Currently we
@@ -1232,7 +1332,7 @@ class LowerMatrixIntrinsics {
     if (I == ShapeMap.end())
       return false;
 
-    LowerLoad(Inst, Ptr, Builder.getInt32(I->second.NumRows), I->second);
+    LowerLoad(Inst, Ptr, Builder.getInt32(I->second.getStride()), I->second);
     return true;
   }
 
@@ -1242,7 +1342,8 @@ class LowerMatrixIntrinsics {
     if (I == ShapeMap.end())
       return false;
 
-    LowerStore(Inst, StoredVal, Ptr, Builder.getInt32(I->second.NumRows), I->second);
+    LowerStore(Inst, StoredVal, Ptr, Builder.getInt32(I->second.getStride()),
+               I->second);
     return true;
   }
 
@@ -1258,12 +1359,15 @@ class LowerMatrixIntrinsics {
     IRBuilder<> Builder(Inst);
     ShapeInfo &Shape = I->second;
 
-    MatrixTy LoweredLhs = getMatrix(Lhs, Shape, Builder);
-    MatrixTy LoweredRhs = getMatrix(Rhs, Shape, Builder);
-
-    // Add each column and store the result back into the opmapping
     MatrixTy Result;
-    auto BuildColumnOp = [&Builder, Inst](Value *LHS, Value *RHS) {
+    MatrixTy A = getMatrix(Lhs, Shape, Builder);
+    MatrixTy B = getMatrix(Rhs, Shape, Builder);
+    assert(A.isColumnMajor() == B.isColumnMajor() &&
+           Result.isColumnMajor() == A.isColumnMajor() &&
+           "operands must agree on matrix layout");
+
+    // Helper to perform binary op on vectors.
+    auto BuildVectorOp = [&Builder, Inst](Value *LHS, Value *RHS) {
       switch (Inst->getOpcode()) {
       case Instruction::Add:
         return Builder.CreateAdd(LHS, RHS);
@@ -1281,13 +1385,13 @@ class LowerMatrixIntrinsics {
         llvm_unreachable("Unsupported binary operator for matrix");
       }
     };
-    for (unsigned C = 0; C < Shape.NumColumns; ++C)
-      Result.addColumn(
-          BuildColumnOp(LoweredLhs.getColumn(C), LoweredRhs.getColumn(C)));
+
+    for (unsigned I = 0; I < Shape.getNumVectors(); ++I)
+      Result.addVector(BuildVectorOp(A.getVector(I), B.getVector(I)));
 
     finalizeLowering(Inst,
-                     Result.addNumComputeOps(getNumOps(Result.getColumnTy()) *
-                                             Result.getNumColumns()),
+                     Result.addNumComputeOps(getNumOps(Result.getVectorTy()) *
+                                             Result.getNumVectors()),
                      Builder);
     return true;
   }
@@ -1302,9 +1406,9 @@ class LowerMatrixIntrinsics {
     unsigned LineLength = 0;
     const DataLayout &DL;
 
-    /// Mapping from instructions to column matrixes. It is used to identify
+    /// Mapping from instructions to matrixes. It is used to identify
     /// matrix instructions.
-    const MapVector<Value *, MatrixTy> &Inst2ColumnMatrix;
+    const MapVector<Value *, MatrixTy> &Inst2Matrix;
 
     /// Mapping from values to the leaves of all expressions that the value is
     /// part of.
@@ -1321,12 +1425,12 @@ class LowerMatrixIntrinsics {
     SmallPtrSet<Value *, 8> ReusedExprs;
 
     ExprLinearizer(const DataLayout &DL,
-                   const MapVector<Value *, MatrixTy> &Inst2ColumnMatrix,
+                   const MapVector<Value *, MatrixTy> &Inst2Matrix,
                    const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
                    const SmallSetVector<Value *, 32> &ExprsInSubprogram,
                    Value *Leaf)
-        : Str(), Stream(Str), DL(DL), Inst2ColumnMatrix(Inst2ColumnMatrix),
-          Shared(Shared), ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
+        : Str(), Stream(Str), DL(DL), Inst2Matrix(Inst2Matrix), Shared(Shared),
+          ExprsInSubprogram(ExprsInSubprogram), Leaf(Leaf) {}
 
     void indent(unsigned N) {
       LineLength += N;
@@ -1366,8 +1470,8 @@ class LowerMatrixIntrinsics {
     /// If \p V is a matrix value, print its shape as as NumRows x NumColumns to
     /// \p SS.
     void prettyPrintMatrixType(Value *V, raw_string_ostream &SS) {
-      auto M = Inst2ColumnMatrix.find(V);
-      if (M == Inst2ColumnMatrix.end())
+      auto M = Inst2Matrix.find(V);
+      if (M == Inst2Matrix.end())
         SS << "unknown";
       else {
         SS << M->second.getNumRows();
@@ -1565,18 +1669,18 @@ class LowerMatrixIntrinsics {
   ///    that multiple leaves can share sub-expressions. Shared subexpressions
   ///    are explicitly marked as shared().
   struct RemarkGenerator {
-    const MapVector<Value *, MatrixTy> &Inst2ColumnMatrix;
+    const MapVector<Value *, MatrixTy> &Inst2Matrix;
     OptimizationRemarkEmitter &ORE;
     Function &Func;
     const DataLayout &DL;
 
-    RemarkGenerator(const MapVector<Value *, MatrixTy> &Inst2ColumnMatrix,
+    RemarkGenerator(const MapVector<Value *, MatrixTy> &Inst2Matrix,
                     OptimizationRemarkEmitter &ORE, Function &Func)
-        : Inst2ColumnMatrix(Inst2ColumnMatrix), ORE(ORE), Func(Func),
+        : Inst2Matrix(Inst2Matrix), ORE(ORE), Func(Func),
           DL(Func.getParent()->getDataLayout()) {}
 
     /// Return all leaves of the expressions in \p ExprsInSubprogram. Those are
-    /// instructions in Inst2ColumnMatrix returning void or without any users in
+    /// instructions in Inst2Matrix returning void or without any users in
     /// \p ExprsInSubprogram. Currently that should only include stores.
     SmallVector<Value *, 4>
     getExpressionLeaves(const SmallSetVector<Value *, 32> &ExprsInSubprogram) {
@@ -1626,7 +1730,7 @@ class LowerMatrixIntrinsics {
       OpInfoTy Count;
 
       auto I = Shared.find(Root);
-      auto CM = Inst2ColumnMatrix.find(Root);
+      auto CM = Inst2Matrix.find(Root);
       if (I->second.size() == 1)
         Count = CM->second.getOpInfo();
       else
@@ -1648,7 +1752,7 @@ class LowerMatrixIntrinsics {
       // the inlinedAt chain. If the function does not have a DISubprogram, we
       // only map them to the containing function.
       MapVector<DISubprogram *, SmallVector<Value *, 8>> Subprog2Exprs;
-      for (auto &KV : Inst2ColumnMatrix) {
+      for (auto &KV : Inst2Matrix) {
         if (Func.getSubprogram()) {
           auto *I = cast<Instruction>(KV.first);
           DILocation *Context = I->getDebugLoc();
@@ -1720,7 +1824,7 @@ class LowerMatrixIntrinsics {
               const DenseMap<Value *, SmallPtrSet<Value *, 2>> &Shared,
               const SmallSetVector<Value *, 32> &ExprsInSubprogram,
               const DataLayout &DL) {
-      ExprLinearizer Lin(DL, Inst2ColumnMatrix, Shared, ExprsInSubprogram, L);
+      ExprLinearizer Lin(DL, Inst2Matrix, Shared, ExprsInSubprogram, L);
       Lin.linearizeExpr(L, 0, false, false);
       return Lin.getResult();
     }

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll
new file mode 100644
index 000000000000..e449680da269
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-add-sub-double-row-major.ll
@@ -0,0 +1,152 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --verbose
+
+; RUN: opt -lower-matrix-intrinsics -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=RM %s
+
+; Check row-major code generation for loads, stores, binary operators (fadd/fsub) and multiply.
+; %a.ptr is a pointer to a 2x3 matrix, %b.ptr to a 3x2 matrix and %c.ptr to a 2x2 matrix.
+; Load, store and binary operators on %a should operate on 3 element vectors and on 2 element vectors for %b.
+define void @multiply_sub_add_2x3_3x2(<6 x double>* %a.ptr, <6 x double>* %b.ptr, <4 x double>* %c.ptr) {
+; RM-LABEL: @multiply_sub_add_2x3_3x2(
+; RM-NEXT:  entry:
+; RM-NEXT:    [[TMP0:%.*]] = bitcast <6 x double>* [[A_PTR:%.*]] to double*
+; RM-NEXT:    [[VEC_CAST:%.*]] = bitcast double* [[TMP0]] to <3 x double>*
+; RM-NEXT:    [[COL_LOAD:%.*]] = load <3 x double>, <3 x double>* [[VEC_CAST]], align 8
+; RM-NEXT:    [[VEC_GEP:%.*]] = getelementptr double, double* [[TMP0]], i32 3
+; RM-NEXT:    [[VEC_CAST1:%.*]] = bitcast double* [[VEC_GEP]] to <3 x double>*
+; RM-NEXT:    [[COL_LOAD2:%.*]] = load <3 x double>, <3 x double>* [[VEC_CAST1]], align 8
+; RM-NEXT:    [[TMP1:%.*]] = bitcast <6 x double>* [[B_PTR:%.*]] to double*
+; RM-NEXT:    [[VEC_CAST3:%.*]] = bitcast double* [[TMP1]] to <2 x double>*
+; RM-NEXT:    [[COL_LOAD4:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST3]], align 8
+; RM-NEXT:    [[VEC_GEP5:%.*]] = getelementptr double, double* [[TMP1]], i32 2
+; RM-NEXT:    [[VEC_CAST6:%.*]] = bitcast double* [[VEC_GEP5]] to <2 x double>*
+; RM-NEXT:    [[COL_LOAD7:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST6]], align 8
+; RM-NEXT:    [[VEC_GEP8:%.*]] = getelementptr double, double* [[TMP1]], i32 4
+; RM-NEXT:    [[VEC_CAST9:%.*]] = bitcast double* [[VEC_GEP8]] to <2 x double>*
+; RM-NEXT:    [[COL_LOAD10:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST9]], align 8
+; RM-NEXT:    [[TMP2:%.*]] = fadd <3 x double> [[COL_LOAD]], [[COL_LOAD]]
+; RM-NEXT:    [[TMP3:%.*]] = fadd <3 x double> [[COL_LOAD2]], [[COL_LOAD2]]
+; RM-NEXT:    [[TMP4:%.*]] = bitcast <6 x double>* [[A_PTR]] to double*
+; RM-NEXT:    [[VEC_CAST11:%.*]] = bitcast double* [[TMP4]] to <3 x double>*
+; RM-NEXT:    store <3 x double> [[TMP2]], <3 x double>* [[VEC_CAST11]], align 8
+; RM-NEXT:    [[VEC_GEP12:%.*]] = getelementptr double, double* [[TMP4]], i32 3
+; RM-NEXT:    [[VEC_CAST13:%.*]] = bitcast double* [[VEC_GEP12]] to <3 x double>*
+; RM-NEXT:    store <3 x double> [[TMP3]], <3 x double>* [[VEC_CAST13]], align 8
+; RM-NEXT:    [[TMP5:%.*]] = fsub <2 x double> [[COL_LOAD4]], <double 1.000000e+00, double 1.000000e+00>
+; RM-NEXT:    [[TMP6:%.*]] = fsub <2 x double> [[COL_LOAD7]], <double 1.000000e+00, double 1.000000e+00>
+; RM-NEXT:    [[TMP7:%.*]] = fsub <2 x double> [[COL_LOAD10]], <double 1.000000e+00, double 1.000000e+00>
+; RM-NEXT:    [[TMP8:%.*]] = bitcast <6 x double>* [[B_PTR]] to double*
+; RM-NEXT:    [[VEC_CAST14:%.*]] = bitcast double* [[TMP8]] to <2 x double>*
+; RM-NEXT:    store <2 x double> [[TMP5]], <2 x double>* [[VEC_CAST14]], align 8
+; RM-NEXT:    [[VEC_GEP15:%.*]] = getelementptr double, double* [[TMP8]], i32 2
+; RM-NEXT:    [[VEC_CAST16:%.*]] = bitcast double* [[VEC_GEP15]] to <2 x double>*
+; RM-NEXT:    store <2 x double> [[TMP6]], <2 x double>* [[VEC_CAST16]], align 8
+; RM-NEXT:    [[VEC_GEP17:%.*]] = getelementptr double, double* [[TMP8]], i32 4
+; RM-NEXT:    [[VEC_CAST18:%.*]] = bitcast double* [[VEC_GEP17]] to <2 x double>*
+; RM-NEXT:    store <2 x double> [[TMP7]], <2 x double>* [[VEC_CAST18]], align 8
+; RM-NEXT:    [[BLOCK:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP9:%.*]] = extractelement <3 x double> [[TMP2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> undef, double [[TMP9]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP10:%.*]] = fmul <1 x double> [[SPLAT_SPLAT]], [[BLOCK]]
+; RM-NEXT:    [[BLOCK19:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP11:%.*]] = extractelement <3 x double> [[TMP2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT20:%.*]] = insertelement <1 x double> undef, double [[TMP11]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT21:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT20]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP12:%.*]] = fmul <1 x double> [[SPLAT_SPLAT21]], [[BLOCK19]]
+; RM-NEXT:    [[TMP13:%.*]] = fadd <1 x double> [[TMP10]], [[TMP12]]
+; RM-NEXT:    [[BLOCK22:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP14:%.*]] = extractelement <3 x double> [[TMP2]], i64 2
+; RM-NEXT:    [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> undef, double [[TMP14]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP15:%.*]] = fmul <1 x double> [[SPLAT_SPLAT24]], [[BLOCK22]]
+; RM-NEXT:    [[TMP16:%.*]] = fadd <1 x double> [[TMP13]], [[TMP15]]
+; RM-NEXT:    [[TMP17:%.*]] = shufflevector <1 x double> [[TMP16]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP18:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP17]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK25:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP19:%.*]] = extractelement <3 x double> [[TMP2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT26:%.*]] = insertelement <1 x double> undef, double [[TMP19]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT27:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT26]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP20:%.*]] = fmul <1 x double> [[SPLAT_SPLAT27]], [[BLOCK25]]
+; RM-NEXT:    [[BLOCK28:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP21:%.*]] = extractelement <3 x double> [[TMP2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT29:%.*]] = insertelement <1 x double> undef, double [[TMP21]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT30:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT29]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP22:%.*]] = fmul <1 x double> [[SPLAT_SPLAT30]], [[BLOCK28]]
+; RM-NEXT:    [[TMP23:%.*]] = fadd <1 x double> [[TMP20]], [[TMP22]]
+; RM-NEXT:    [[BLOCK31:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP24:%.*]] = extractelement <3 x double> [[TMP2]], i64 2
+; RM-NEXT:    [[SPLAT_SPLATINSERT32:%.*]] = insertelement <1 x double> undef, double [[TMP24]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT33:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT32]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP25:%.*]] = fmul <1 x double> [[SPLAT_SPLAT33]], [[BLOCK31]]
+; RM-NEXT:    [[TMP26:%.*]] = fadd <1 x double> [[TMP23]], [[TMP25]]
+; RM-NEXT:    [[TMP27:%.*]] = shufflevector <1 x double> [[TMP26]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP28:%.*]] = shufflevector <2 x double> [[TMP18]], <2 x double> [[TMP27]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[BLOCK34:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP29:%.*]] = extractelement <3 x double> [[TMP3]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT35:%.*]] = insertelement <1 x double> undef, double [[TMP29]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT36:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT35]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP30:%.*]] = fmul <1 x double> [[SPLAT_SPLAT36]], [[BLOCK34]]
+; RM-NEXT:    [[BLOCK37:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP31:%.*]] = extractelement <3 x double> [[TMP3]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT38:%.*]] = insertelement <1 x double> undef, double [[TMP31]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT39:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT38]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP32:%.*]] = fmul <1 x double> [[SPLAT_SPLAT39]], [[BLOCK37]]
+; RM-NEXT:    [[TMP33:%.*]] = fadd <1 x double> [[TMP30]], [[TMP32]]
+; RM-NEXT:    [[BLOCK40:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP34:%.*]] = extractelement <3 x double> [[TMP3]], i64 2
+; RM-NEXT:    [[SPLAT_SPLATINSERT41:%.*]] = insertelement <1 x double> undef, double [[TMP34]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT42:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT41]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP35:%.*]] = fmul <1 x double> [[SPLAT_SPLAT42]], [[BLOCK40]]
+; RM-NEXT:    [[TMP36:%.*]] = fadd <1 x double> [[TMP33]], [[TMP35]]
+; RM-NEXT:    [[TMP37:%.*]] = shufflevector <1 x double> [[TMP36]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP38:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP37]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK43:%.*]] = shufflevector <2 x double> [[TMP5]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP39:%.*]] = extractelement <3 x double> [[TMP3]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT44:%.*]] = insertelement <1 x double> undef, double [[TMP39]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT45:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT44]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP40:%.*]] = fmul <1 x double> [[SPLAT_SPLAT45]], [[BLOCK43]]
+; RM-NEXT:    [[BLOCK46:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP41:%.*]] = extractelement <3 x double> [[TMP3]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT47:%.*]] = insertelement <1 x double> undef, double [[TMP41]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT48:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT47]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP42:%.*]] = fmul <1 x double> [[SPLAT_SPLAT48]], [[BLOCK46]]
+; RM-NEXT:    [[TMP43:%.*]] = fadd <1 x double> [[TMP40]], [[TMP42]]
+; RM-NEXT:    [[BLOCK49:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP44:%.*]] = extractelement <3 x double> [[TMP3]], i64 2
+; RM-NEXT:    [[SPLAT_SPLATINSERT50:%.*]] = insertelement <1 x double> undef, double [[TMP44]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT51:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT50]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP45:%.*]] = fmul <1 x double> [[SPLAT_SPLAT51]], [[BLOCK49]]
+; RM-NEXT:    [[TMP46:%.*]] = fadd <1 x double> [[TMP43]], [[TMP45]]
+; RM-NEXT:    [[TMP47:%.*]] = shufflevector <1 x double> [[TMP46]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP48:%.*]] = shufflevector <2 x double> [[TMP38]], <2 x double> [[TMP47]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[TMP49:%.*]] = bitcast <4 x double>* [[C_PTR:%.*]] to double*
+; RM-NEXT:    [[VEC_CAST52:%.*]] = bitcast double* [[TMP49]] to <2 x double>*
+; RM-NEXT:    [[COL_LOAD53:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST52]], align 8
+; RM-NEXT:    [[VEC_GEP54:%.*]] = getelementptr double, double* [[TMP49]], i32 2
+; RM-NEXT:    [[VEC_CAST55:%.*]] = bitcast double* [[VEC_GEP54]] to <2 x double>*
+; RM-NEXT:    [[COL_LOAD56:%.*]] = load <2 x double>, <2 x double>* [[VEC_CAST55]], align 8
+; RM-NEXT:    [[TMP50:%.*]] = fsub <2 x double> [[COL_LOAD53]], [[TMP28]]
+; RM-NEXT:    [[TMP51:%.*]] = fsub <2 x double> [[COL_LOAD56]], [[TMP48]]
+; RM-NEXT:    [[TMP52:%.*]] = bitcast <4 x double>* [[C_PTR]] to double*
+; RM-NEXT:    [[VEC_CAST57:%.*]] = bitcast double* [[TMP52]] to <2 x double>*
+; RM-NEXT:    store <2 x double> [[TMP50]], <2 x double>* [[VEC_CAST57]], align 8
+; RM-NEXT:    [[VEC_GEP58:%.*]] = getelementptr double, double* [[TMP52]], i32 2
+; RM-NEXT:    [[VEC_CAST59:%.*]] = bitcast double* [[VEC_GEP58]] to <2 x double>*
+; RM-NEXT:    store <2 x double> [[TMP51]], <2 x double>* [[VEC_CAST59]], align 8
+; RM-NEXT:    ret void
+;
+entry:
+  %a = load <6 x double>, <6 x double>* %a.ptr
+  %b = load <6 x double>, <6 x double>* %b.ptr
+  %add = fadd <6 x double> %a, %a
+  store <6 x double> %add, <6 x double>* %a.ptr
+  %sub = fsub <6 x double> %b, <double 1.0, double 1.0, double 1.0, double 1.0, double 1.0, double 1.0>
+  store <6 x double> %sub, <6 x double>* %b.ptr
+  %mul = call <4 x double> @llvm.matrix.multiply.v4f64.v6f64.v6f64(<6 x double> %add, <6 x double> %sub, i32 2, i32 3, i32 2)
+  %c = load <4 x double>, <4 x double>* %c.ptr
+  %res = fsub <4 x double> %c, %mul
+  store <4 x double> %res, <4 x double>* %c.ptr
+  ret void
+}
+
+declare <4 x double> @llvm.matrix.multiply.v4f64.v6f64.v6f64(<6 x double>, <6 x double>, i32, i32, i32)

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-double-row-major.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-double-row-major.ll
new file mode 100644
index 000000000000..6f779964da0e
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-double-row-major.ll
@@ -0,0 +1,256 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --verbose
+
+; RUN: opt -lower-matrix-intrinsics -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=RM %s
+
+
+
+define <4 x double> @multiply_2x2(<4 x double> %a, <4 x double> %b) {
+; RM-LABEL: @multiply_2x2(
+; RM-NEXT:  entry:
+; RM-NEXT:    [[SPLIT:%.*]] = shufflevector <4 x double> [[A:%.*]], <4 x double> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[SPLIT1:%.*]] = shufflevector <4 x double> [[A]], <4 x double> undef, <2 x i32> <i32 2, i32 3>
+; RM-NEXT:    [[SPLIT2:%.*]] = shufflevector <4 x double> [[B:%.*]], <4 x double> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[SPLIT3:%.*]] = shufflevector <4 x double> [[B]], <4 x double> undef, <2 x i32> <i32 2, i32 3>
+; RM-NEXT:    [[BLOCK:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP0:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> undef, double [[TMP0]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP1:%.*]] = fmul <1 x double> [[SPLAT_SPLAT]], [[BLOCK]]
+; RM-NEXT:    [[BLOCK4:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT5:%.*]] = insertelement <1 x double> undef, double [[TMP2]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT6:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT5]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP3:%.*]] = fmul <1 x double> [[SPLAT_SPLAT6]], [[BLOCK4]]
+; RM-NEXT:    [[TMP4:%.*]] = fadd <1 x double> [[TMP1]], [[TMP3]]
+; RM-NEXT:    [[TMP5:%.*]] = shufflevector <1 x double> [[TMP4]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP6:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP5]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK7:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP7:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT8:%.*]] = insertelement <1 x double> undef, double [[TMP7]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT9:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT8]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP8:%.*]] = fmul <1 x double> [[SPLAT_SPLAT9]], [[BLOCK7]]
+; RM-NEXT:    [[BLOCK10:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP9:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT11:%.*]] = insertelement <1 x double> undef, double [[TMP9]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT12:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT11]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP10:%.*]] = fmul <1 x double> [[SPLAT_SPLAT12]], [[BLOCK10]]
+; RM-NEXT:    [[TMP11:%.*]] = fadd <1 x double> [[TMP8]], [[TMP10]]
+; RM-NEXT:    [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP13:%.*]] = shufflevector <2 x double> [[TMP6]], <2 x double> [[TMP12]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[BLOCK13:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP14:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT14:%.*]] = insertelement <1 x double> undef, double [[TMP14]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT15:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT14]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP15:%.*]] = fmul <1 x double> [[SPLAT_SPLAT15]], [[BLOCK13]]
+; RM-NEXT:    [[BLOCK16:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP16:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT17:%.*]] = insertelement <1 x double> undef, double [[TMP16]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT18:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT17]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP17:%.*]] = fmul <1 x double> [[SPLAT_SPLAT18]], [[BLOCK16]]
+; RM-NEXT:    [[TMP18:%.*]] = fadd <1 x double> [[TMP15]], [[TMP17]]
+; RM-NEXT:    [[TMP19:%.*]] = shufflevector <1 x double> [[TMP18]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP20:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP19]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK19:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP21:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT20:%.*]] = insertelement <1 x double> undef, double [[TMP21]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT21:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT20]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP22:%.*]] = fmul <1 x double> [[SPLAT_SPLAT21]], [[BLOCK19]]
+; RM-NEXT:    [[BLOCK22:%.*]] = shufflevector <2 x double> [[SPLIT3]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP23:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x double> undef, double [[TMP23]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT23]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP24:%.*]] = fmul <1 x double> [[SPLAT_SPLAT24]], [[BLOCK22]]
+; RM-NEXT:    [[TMP25:%.*]] = fadd <1 x double> [[TMP22]], [[TMP24]]
+; RM-NEXT:    [[TMP26:%.*]] = shufflevector <1 x double> [[TMP25]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP27:%.*]] = shufflevector <2 x double> [[TMP20]], <2 x double> [[TMP26]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[TMP28:%.*]] = shufflevector <2 x double> [[TMP13]], <2 x double> [[TMP27]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; RM-NEXT:    ret <4 x double> [[TMP28]]
+;
+entry:
+  %c = call <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double> %a, <4 x double> %b, i32 2, i32 2, i32 2)
+  ret <4 x double> %c
+}
+
+declare <4 x double> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x double>, <4 x double>, i32, i32, i32)
+
+define <4 x double> @multiply_1x2(<2 x double> %a, <2 x double> %b) {
+
+; RM-LABEL: @multiply_1x2(
+; RM-NEXT:  entry:
+; RM-NEXT:    [[SPLIT:%.*]] = shufflevector <2 x double> [[A:%.*]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[SPLIT1:%.*]] = shufflevector <2 x double> [[A]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[SPLIT2:%.*]] = shufflevector <2 x double> [[B:%.*]], <2 x double> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[BLOCK:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP0:%.*]] = extractelement <1 x double> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> undef, double [[TMP0]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP1:%.*]] = fmul <1 x double> [[SPLAT_SPLAT]], [[BLOCK]]
+; RM-NEXT:    [[TMP2:%.*]] = shufflevector <1 x double> [[TMP1]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP3:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP2]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK3:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP4:%.*]] = extractelement <1 x double> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT4:%.*]] = insertelement <1 x double> undef, double [[TMP4]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT5:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT4]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP5:%.*]] = fmul <1 x double> [[SPLAT_SPLAT5]], [[BLOCK3]]
+; RM-NEXT:    [[TMP6:%.*]] = shufflevector <1 x double> [[TMP5]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP7:%.*]] = shufflevector <2 x double> [[TMP3]], <2 x double> [[TMP6]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[BLOCK6:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP8:%.*]] = extractelement <1 x double> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT7:%.*]] = insertelement <1 x double> undef, double [[TMP8]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT8:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT7]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP9:%.*]] = fmul <1 x double> [[SPLAT_SPLAT8]], [[BLOCK6]]
+; RM-NEXT:    [[TMP10:%.*]] = shufflevector <1 x double> [[TMP9]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP11:%.*]] = shufflevector <2 x double> undef, <2 x double> [[TMP10]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK9:%.*]] = shufflevector <2 x double> [[SPLIT2]], <2 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP12:%.*]] = extractelement <1 x double> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x double> undef, double [[TMP12]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT10]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP13:%.*]] = fmul <1 x double> [[SPLAT_SPLAT11]], [[BLOCK9]]
+; RM-NEXT:    [[TMP14:%.*]] = shufflevector <1 x double> [[TMP13]], <1 x double> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP15:%.*]] = shufflevector <2 x double> [[TMP11]], <2 x double> [[TMP14]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[TMP16:%.*]] = shufflevector <2 x double> [[TMP7]], <2 x double> [[TMP15]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; RM-NEXT:    ret <4 x double> [[TMP16]]
+;
+entry:
+  %c = call <4 x double> @llvm.matrix.multiply.v4f64.v2f64.v2f64(<2 x double> %a, <2 x double> %b, i32 2, i32 1, i32 2)
+  ret <4 x double> %c
+}
+
+declare <4 x double> @llvm.matrix.multiply.v4f64.v2f64.v2f64(<2 x double>, <2 x double>, i32, i32, i32)
+
+define <9 x double> @multiply_2x3(<6 x double> %a, <6 x double> %b) {
+; RM-LABEL: @multiply_2x3(
+; RM-NEXT:  entry:
+; RM-NEXT:    [[SPLIT:%.*]] = shufflevector <6 x double> [[A:%.*]], <6 x double> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[SPLIT1:%.*]] = shufflevector <6 x double> [[A]], <6 x double> undef, <2 x i32> <i32 2, i32 3>
+; RM-NEXT:    [[SPLIT2:%.*]] = shufflevector <6 x double> [[A]], <6 x double> undef, <2 x i32> <i32 4, i32 5>
+; RM-NEXT:    [[SPLIT3:%.*]] = shufflevector <6 x double> [[B:%.*]], <6 x double> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; RM-NEXT:    [[SPLIT4:%.*]] = shufflevector <6 x double> [[B]], <6 x double> undef, <3 x i32> <i32 3, i32 4, i32 5>
+; RM-NEXT:    [[BLOCK:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP0:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x double> undef, double [[TMP0]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP1:%.*]] = fmul <1 x double> [[SPLAT_SPLAT]], [[BLOCK]]
+; RM-NEXT:    [[BLOCK5:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP2:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x double> undef, double [[TMP2]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT6]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP3:%.*]] = fmul <1 x double> [[SPLAT_SPLAT7]], [[BLOCK5]]
+; RM-NEXT:    [[TMP4:%.*]] = fadd <1 x double> [[TMP1]], [[TMP3]]
+; RM-NEXT:    [[TMP5:%.*]] = shufflevector <1 x double> [[TMP4]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP6:%.*]] = shufflevector <3 x double> undef, <3 x double> [[TMP5]], <3 x i32> <i32 3, i32 1, i32 2>
+; RM-NEXT:    [[BLOCK8:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP7:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT9:%.*]] = insertelement <1 x double> undef, double [[TMP7]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT10:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT9]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP8:%.*]] = fmul <1 x double> [[SPLAT_SPLAT10]], [[BLOCK8]]
+; RM-NEXT:    [[BLOCK11:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP9:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT12:%.*]] = insertelement <1 x double> undef, double [[TMP9]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT13:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT12]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP10:%.*]] = fmul <1 x double> [[SPLAT_SPLAT13]], [[BLOCK11]]
+; RM-NEXT:    [[TMP11:%.*]] = fadd <1 x double> [[TMP8]], [[TMP10]]
+; RM-NEXT:    [[TMP12:%.*]] = shufflevector <1 x double> [[TMP11]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP13:%.*]] = shufflevector <3 x double> [[TMP6]], <3 x double> [[TMP12]], <3 x i32> <i32 0, i32 3, i32 2>
+; RM-NEXT:    [[BLOCK14:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP14:%.*]] = extractelement <2 x double> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT15:%.*]] = insertelement <1 x double> undef, double [[TMP14]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT16:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT15]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP15:%.*]] = fmul <1 x double> [[SPLAT_SPLAT16]], [[BLOCK14]]
+; RM-NEXT:    [[BLOCK17:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP16:%.*]] = extractelement <2 x double> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT18:%.*]] = insertelement <1 x double> undef, double [[TMP16]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT19:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT18]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP17:%.*]] = fmul <1 x double> [[SPLAT_SPLAT19]], [[BLOCK17]]
+; RM-NEXT:    [[TMP18:%.*]] = fadd <1 x double> [[TMP15]], [[TMP17]]
+; RM-NEXT:    [[TMP19:%.*]] = shufflevector <1 x double> [[TMP18]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP20:%.*]] = shufflevector <3 x double> [[TMP13]], <3 x double> [[TMP19]], <3 x i32> <i32 0, i32 1, i32 3>
+; RM-NEXT:    [[BLOCK20:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP21:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT21:%.*]] = insertelement <1 x double> undef, double [[TMP21]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT22:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT21]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP22:%.*]] = fmul <1 x double> [[SPLAT_SPLAT22]], [[BLOCK20]]
+; RM-NEXT:    [[BLOCK23:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP23:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT24:%.*]] = insertelement <1 x double> undef, double [[TMP23]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT25:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT24]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP24:%.*]] = fmul <1 x double> [[SPLAT_SPLAT25]], [[BLOCK23]]
+; RM-NEXT:    [[TMP25:%.*]] = fadd <1 x double> [[TMP22]], [[TMP24]]
+; RM-NEXT:    [[TMP26:%.*]] = shufflevector <1 x double> [[TMP25]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP27:%.*]] = shufflevector <3 x double> undef, <3 x double> [[TMP26]], <3 x i32> <i32 3, i32 1, i32 2>
+; RM-NEXT:    [[BLOCK26:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP28:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT27:%.*]] = insertelement <1 x double> undef, double [[TMP28]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT28:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT27]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP29:%.*]] = fmul <1 x double> [[SPLAT_SPLAT28]], [[BLOCK26]]
+; RM-NEXT:    [[BLOCK29:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP30:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT30:%.*]] = insertelement <1 x double> undef, double [[TMP30]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT31:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT30]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP31:%.*]] = fmul <1 x double> [[SPLAT_SPLAT31]], [[BLOCK29]]
+; RM-NEXT:    [[TMP32:%.*]] = fadd <1 x double> [[TMP29]], [[TMP31]]
+; RM-NEXT:    [[TMP33:%.*]] = shufflevector <1 x double> [[TMP32]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP34:%.*]] = shufflevector <3 x double> [[TMP27]], <3 x double> [[TMP33]], <3 x i32> <i32 0, i32 3, i32 2>
+; RM-NEXT:    [[BLOCK32:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP35:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT33:%.*]] = insertelement <1 x double> undef, double [[TMP35]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT34:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT33]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP36:%.*]] = fmul <1 x double> [[SPLAT_SPLAT34]], [[BLOCK32]]
+; RM-NEXT:    [[BLOCK35:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP37:%.*]] = extractelement <2 x double> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT36:%.*]] = insertelement <1 x double> undef, double [[TMP37]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT37:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT36]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP38:%.*]] = fmul <1 x double> [[SPLAT_SPLAT37]], [[BLOCK35]]
+; RM-NEXT:    [[TMP39:%.*]] = fadd <1 x double> [[TMP36]], [[TMP38]]
+; RM-NEXT:    [[TMP40:%.*]] = shufflevector <1 x double> [[TMP39]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP41:%.*]] = shufflevector <3 x double> [[TMP34]], <3 x double> [[TMP40]], <3 x i32> <i32 0, i32 1, i32 3>
+; RM-NEXT:    [[BLOCK38:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP42:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT39:%.*]] = insertelement <1 x double> undef, double [[TMP42]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT40:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT39]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP43:%.*]] = fmul <1 x double> [[SPLAT_SPLAT40]], [[BLOCK38]]
+; RM-NEXT:    [[BLOCK41:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP44:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT42:%.*]] = insertelement <1 x double> undef, double [[TMP44]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT43:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT42]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP45:%.*]] = fmul <1 x double> [[SPLAT_SPLAT43]], [[BLOCK41]]
+; RM-NEXT:    [[TMP46:%.*]] = fadd <1 x double> [[TMP43]], [[TMP45]]
+; RM-NEXT:    [[TMP47:%.*]] = shufflevector <1 x double> [[TMP46]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP48:%.*]] = shufflevector <3 x double> undef, <3 x double> [[TMP47]], <3 x i32> <i32 3, i32 1, i32 2>
+; RM-NEXT:    [[BLOCK44:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP49:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT45:%.*]] = insertelement <1 x double> undef, double [[TMP49]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT46:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT45]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP50:%.*]] = fmul <1 x double> [[SPLAT_SPLAT46]], [[BLOCK44]]
+; RM-NEXT:    [[BLOCK47:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP51:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT48:%.*]] = insertelement <1 x double> undef, double [[TMP51]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT49:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT48]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP52:%.*]] = fmul <1 x double> [[SPLAT_SPLAT49]], [[BLOCK47]]
+; RM-NEXT:    [[TMP53:%.*]] = fadd <1 x double> [[TMP50]], [[TMP52]]
+; RM-NEXT:    [[TMP54:%.*]] = shufflevector <1 x double> [[TMP53]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP55:%.*]] = shufflevector <3 x double> [[TMP48]], <3 x double> [[TMP54]], <3 x i32> <i32 0, i32 3, i32 2>
+; RM-NEXT:    [[BLOCK50:%.*]] = shufflevector <3 x double> [[SPLIT3]], <3 x double> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP56:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT51:%.*]] = insertelement <1 x double> undef, double [[TMP56]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT52:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT51]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP57:%.*]] = fmul <1 x double> [[SPLAT_SPLAT52]], [[BLOCK50]]
+; RM-NEXT:    [[BLOCK53:%.*]] = shufflevector <3 x double> [[SPLIT4]], <3 x double> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP58:%.*]] = extractelement <2 x double> [[SPLIT2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT54:%.*]] = insertelement <1 x double> undef, double [[TMP58]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT55:%.*]] = shufflevector <1 x double> [[SPLAT_SPLATINSERT54]], <1 x double> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP59:%.*]] = fmul <1 x double> [[SPLAT_SPLAT55]], [[BLOCK53]]
+; RM-NEXT:    [[TMP60:%.*]] = fadd <1 x double> [[TMP57]], [[TMP59]]
+; RM-NEXT:    [[TMP61:%.*]] = shufflevector <1 x double> [[TMP60]], <1 x double> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP62:%.*]] = shufflevector <3 x double> [[TMP55]], <3 x double> [[TMP61]], <3 x i32> <i32 0, i32 1, i32 3>
+; RM-NEXT:    [[TMP63:%.*]] = shufflevector <3 x double> [[TMP20]], <3 x double> [[TMP41]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
+; RM-NEXT:    [[TMP64:%.*]] = shufflevector <3 x double> [[TMP62]], <3 x double> undef, <6 x i32> <i32 0, i32 1, i32 2, i32 undef, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP65:%.*]] = shufflevector <6 x double> [[TMP63]], <6 x double> [[TMP64]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+; RM-NEXT:    ret <9 x double> [[TMP65]]
+;
+entry:
+  %c = call <9 x double> @llvm.matrix.multiply.v6f64.v6f64.v6f64(<6 x double> %a, <6 x double> %b, i32 3, i32 2, i32 3)
+  ret <9 x double> %c
+}
+
+declare <9 x double> @llvm.matrix.multiply.v6f64.v6f64.v6f64(<6 x double>, <6 x double>, i32, i32, i32)

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
index c64d70a105ef..f7cb89d64b9f 100644
--- a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-fused-multiple-blocks.ll
@@ -164,34 +164,34 @@ define void @test(<6 x double> * %A, <6 x double> * %B, <9 x double>* %C, i1 %co
 ; CHECK-NEXT:    [[LD_B97:%.*]] = ptrtoint <6 x double>* [[A]] to i64
 ; CHECK-NEXT:    [[TMP36:%.*]] = icmp ugt i64 [[ST_E93]], [[LD_B97]]
 ; CHECK-NEXT:    br i1 [[TMP36]], label [[ALIAS_CONT94:%.*]], label [[NO_ALIAS96:%.*]]
-; CHECK:       alias_cont92:
+; CHECK:       alias_cont91:
 ; CHECK-NEXT:    [[LD_E98:%.*]] = add nuw nsw i64 [[LD_B97]], 48
 ; CHECK-NEXT:    [[TMP37:%.*]] = icmp ugt i64 [[LD_E98]], [[ST_B92]]
 ; CHECK-NEXT:    br i1 [[TMP37]], label [[COPY95:%.*]], label [[NO_ALIAS96]]
-; CHECK:       copy93:
+; CHECK:       copy92:
 ; CHECK-NEXT:    [[TMP38:%.*]] = alloca <6 x double>, align 64
 ; CHECK-NEXT:    [[TMP39:%.*]] = bitcast <6 x double>* [[TMP38]] to i8*
 ; CHECK-NEXT:    [[TMP40:%.*]] = bitcast <6 x double>* [[A]] to i8*
 ; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP39]], i8* nonnull align 16 dereferenceable(48) [[TMP40]], i64 48, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS96]]
-; CHECK:       no_alias94:
+; CHECK:       no_alias93:
 ; CHECK-NEXT:    [[TMP41:%.*]] = phi <6 x double>* [ [[A]], [[END]] ], [ [[A]], [[ALIAS_CONT94]] ], [ [[TMP38]], [[COPY95]] ]
 ; CHECK-NEXT:    [[ST_B99:%.*]] = ptrtoint <9 x double>* [[C]] to i64
 ; CHECK-NEXT:    [[ST_E100:%.*]] = add nuw nsw i64 [[ST_B99]], 72
 ; CHECK-NEXT:    [[LD_B104:%.*]] = ptrtoint <6 x double>* [[B]] to i64
 ; CHECK-NEXT:    [[TMP42:%.*]] = icmp ugt i64 [[ST_E100]], [[LD_B104]]
 ; CHECK-NEXT:    br i1 [[TMP42]], label [[ALIAS_CONT101:%.*]], label [[NO_ALIAS103:%.*]]
-; CHECK:       alias_cont99:
+; CHECK:       alias_cont98:
 ; CHECK-NEXT:    [[LD_E105:%.*]] = add nuw nsw i64 [[LD_B104]], 48
 ; CHECK-NEXT:    [[TMP43:%.*]] = icmp ugt i64 [[LD_E105]], [[ST_B99]]
 ; CHECK-NEXT:    br i1 [[TMP43]], label [[COPY102:%.*]], label [[NO_ALIAS103]]
-; CHECK:       copy100:
+; CHECK:       copy99:
 ; CHECK-NEXT:    [[TMP44:%.*]] = alloca <6 x double>, align 64
 ; CHECK-NEXT:    [[TMP45:%.*]] = bitcast <6 x double>* [[TMP44]] to i8*
 ; CHECK-NEXT:    [[TMP46:%.*]] = bitcast <6 x double>* [[B]] to i8*
 ; CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 64 dereferenceable(48) [[TMP45]], i8* nonnull align 16 dereferenceable(48) [[TMP46]], i64 48, i1 false)
 ; CHECK-NEXT:    br label [[NO_ALIAS103]]
-; CHECK:       no_alias101:
+; CHECK:       no_alias100:
 ; CHECK-NEXT:    [[TMP47:%.*]] = phi <6 x double>* [ [[B]], [[NO_ALIAS96]] ], [ [[B]], [[ALIAS_CONT101]] ], [ [[TMP44]], [[COPY102]] ]
 ; CHECK-NEXT:    [[COL_CAST107:%.*]] = bitcast <6 x double>* [[TMP41]] to <2 x double>*
 ; CHECK-NEXT:    [[COL_LOAD108:%.*]] = load <2 x double>, <2 x double>* [[COL_CAST107]], align 8

diff  --git a/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-i32-row-major.ll b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-i32-row-major.ll
new file mode 100644
index 000000000000..11db4ddfb066
--- /dev/null
+++ b/llvm/test/Transforms/LowerMatrixIntrinsics/multiply-i32-row-major.ll
@@ -0,0 +1,256 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --verbose
+
+; RUN: opt -lower-matrix-intrinsics -matrix-default-layout=row-major -S < %s | FileCheck --check-prefix=RM %s
+
+
+
+define <4 x i32> @multiply_2x2(<4 x i32> %a, <4 x i32> %b) {
+; RM-LABEL: @multiply_2x2(
+; RM-NEXT:  entry:
+; RM-NEXT:    [[SPLIT:%.*]] = shufflevector <4 x i32> [[A:%.*]], <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[SPLIT1:%.*]] = shufflevector <4 x i32> [[A]], <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+; RM-NEXT:    [[SPLIT2:%.*]] = shufflevector <4 x i32> [[B:%.*]], <4 x i32> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[SPLIT3:%.*]] = shufflevector <4 x i32> [[B]], <4 x i32> undef, <2 x i32> <i32 2, i32 3>
+; RM-NEXT:    [[BLOCK:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP0:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x i32> undef, i32 [[TMP0]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP1:%.*]] = mul <1 x i32> [[SPLAT_SPLAT]], [[BLOCK]]
+; RM-NEXT:    [[BLOCK4:%.*]] = shufflevector <2 x i32> [[SPLIT3]], <2 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT5:%.*]] = insertelement <1 x i32> undef, i32 [[TMP2]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT6:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT5]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP3:%.*]] = mul <1 x i32> [[SPLAT_SPLAT6]], [[BLOCK4]]
+; RM-NEXT:    [[TMP4:%.*]] = add <1 x i32> [[TMP1]], [[TMP3]]
+; RM-NEXT:    [[TMP5:%.*]] = shufflevector <1 x i32> [[TMP4]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP6:%.*]] = shufflevector <2 x i32> undef, <2 x i32> [[TMP5]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK7:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP7:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT8:%.*]] = insertelement <1 x i32> undef, i32 [[TMP7]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT9:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT8]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP8:%.*]] = mul <1 x i32> [[SPLAT_SPLAT9]], [[BLOCK7]]
+; RM-NEXT:    [[BLOCK10:%.*]] = shufflevector <2 x i32> [[SPLIT3]], <2 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP9:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT11:%.*]] = insertelement <1 x i32> undef, i32 [[TMP9]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT12:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT11]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP10:%.*]] = mul <1 x i32> [[SPLAT_SPLAT12]], [[BLOCK10]]
+; RM-NEXT:    [[TMP11:%.*]] = add <1 x i32> [[TMP8]], [[TMP10]]
+; RM-NEXT:    [[TMP12:%.*]] = shufflevector <1 x i32> [[TMP11]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP13:%.*]] = shufflevector <2 x i32> [[TMP6]], <2 x i32> [[TMP12]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[BLOCK13:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP14:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT14:%.*]] = insertelement <1 x i32> undef, i32 [[TMP14]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT15:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT14]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP15:%.*]] = mul <1 x i32> [[SPLAT_SPLAT15]], [[BLOCK13]]
+; RM-NEXT:    [[BLOCK16:%.*]] = shufflevector <2 x i32> [[SPLIT3]], <2 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP16:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT17:%.*]] = insertelement <1 x i32> undef, i32 [[TMP16]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT18:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT17]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP17:%.*]] = mul <1 x i32> [[SPLAT_SPLAT18]], [[BLOCK16]]
+; RM-NEXT:    [[TMP18:%.*]] = add <1 x i32> [[TMP15]], [[TMP17]]
+; RM-NEXT:    [[TMP19:%.*]] = shufflevector <1 x i32> [[TMP18]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP20:%.*]] = shufflevector <2 x i32> undef, <2 x i32> [[TMP19]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK19:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP21:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT20:%.*]] = insertelement <1 x i32> undef, i32 [[TMP21]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT21:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT20]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP22:%.*]] = mul <1 x i32> [[SPLAT_SPLAT21]], [[BLOCK19]]
+; RM-NEXT:    [[BLOCK22:%.*]] = shufflevector <2 x i32> [[SPLIT3]], <2 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP23:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT23:%.*]] = insertelement <1 x i32> undef, i32 [[TMP23]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT24:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT23]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP24:%.*]] = mul <1 x i32> [[SPLAT_SPLAT24]], [[BLOCK22]]
+; RM-NEXT:    [[TMP25:%.*]] = add <1 x i32> [[TMP22]], [[TMP24]]
+; RM-NEXT:    [[TMP26:%.*]] = shufflevector <1 x i32> [[TMP25]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP27:%.*]] = shufflevector <2 x i32> [[TMP20]], <2 x i32> [[TMP26]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[TMP28:%.*]] = shufflevector <2 x i32> [[TMP13]], <2 x i32> [[TMP27]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; RM-NEXT:    ret <4 x i32> [[TMP28]]
+;
+entry:
+  %c = call <4 x i32> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x i32> %a, <4 x i32> %b, i32 2, i32 2, i32 2)
+  ret <4 x i32> %c
+}
+
+declare <4 x i32> @llvm.matrix.multiply.v4f64.v4f64.v4f64(<4 x i32>, <4 x i32>, i32, i32, i32)
+
+define <4 x i32> @multiply_1x2(<2 x i32> %a, <2 x i32> %b) {
+
+; RM-LABEL: @multiply_1x2(
+; RM-NEXT:  entry:
+; RM-NEXT:    [[SPLIT:%.*]] = shufflevector <2 x i32> [[A:%.*]], <2 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[SPLIT1:%.*]] = shufflevector <2 x i32> [[A]], <2 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[SPLIT2:%.*]] = shufflevector <2 x i32> [[B:%.*]], <2 x i32> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[BLOCK:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP0:%.*]] = extractelement <1 x i32> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x i32> undef, i32 [[TMP0]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP1:%.*]] = mul <1 x i32> [[SPLAT_SPLAT]], [[BLOCK]]
+; RM-NEXT:    [[TMP2:%.*]] = shufflevector <1 x i32> [[TMP1]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP3:%.*]] = shufflevector <2 x i32> undef, <2 x i32> [[TMP2]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK3:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP4:%.*]] = extractelement <1 x i32> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT4:%.*]] = insertelement <1 x i32> undef, i32 [[TMP4]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT5:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT4]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP5:%.*]] = mul <1 x i32> [[SPLAT_SPLAT5]], [[BLOCK3]]
+; RM-NEXT:    [[TMP6:%.*]] = shufflevector <1 x i32> [[TMP5]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP7:%.*]] = shufflevector <2 x i32> [[TMP3]], <2 x i32> [[TMP6]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[BLOCK6:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP8:%.*]] = extractelement <1 x i32> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT7:%.*]] = insertelement <1 x i32> undef, i32 [[TMP8]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT8:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT7]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP9:%.*]] = mul <1 x i32> [[SPLAT_SPLAT8]], [[BLOCK6]]
+; RM-NEXT:    [[TMP10:%.*]] = shufflevector <1 x i32> [[TMP9]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP11:%.*]] = shufflevector <2 x i32> undef, <2 x i32> [[TMP10]], <2 x i32> <i32 2, i32 1>
+; RM-NEXT:    [[BLOCK9:%.*]] = shufflevector <2 x i32> [[SPLIT2]], <2 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP12:%.*]] = extractelement <1 x i32> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT10:%.*]] = insertelement <1 x i32> undef, i32 [[TMP12]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT11:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT10]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP13:%.*]] = mul <1 x i32> [[SPLAT_SPLAT11]], [[BLOCK9]]
+; RM-NEXT:    [[TMP14:%.*]] = shufflevector <1 x i32> [[TMP13]], <1 x i32> undef, <2 x i32> <i32 0, i32 undef>
+; RM-NEXT:    [[TMP15:%.*]] = shufflevector <2 x i32> [[TMP11]], <2 x i32> [[TMP14]], <2 x i32> <i32 0, i32 2>
+; RM-NEXT:    [[TMP16:%.*]] = shufflevector <2 x i32> [[TMP7]], <2 x i32> [[TMP15]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; RM-NEXT:    ret <4 x i32> [[TMP16]]
+;
+entry:
+  %c = call <4 x i32> @llvm.matrix.multiply.v4f64.v2f64.v2f64(<2 x i32> %a, <2 x i32> %b, i32 2, i32 1, i32 2)
+  ret <4 x i32> %c
+}
+
+declare <4 x i32> @llvm.matrix.multiply.v4f64.v2f64.v2f64(<2 x i32>, <2 x i32>, i32, i32, i32)
+
+define <9 x i32> @multiply_2x3(<6 x i32> %a, <6 x i32> %b) {
+; RM-LABEL: @multiply_2x3(
+; RM-NEXT:  entry:
+; RM-NEXT:    [[SPLIT:%.*]] = shufflevector <6 x i32> [[A:%.*]], <6 x i32> undef, <2 x i32> <i32 0, i32 1>
+; RM-NEXT:    [[SPLIT1:%.*]] = shufflevector <6 x i32> [[A]], <6 x i32> undef, <2 x i32> <i32 2, i32 3>
+; RM-NEXT:    [[SPLIT2:%.*]] = shufflevector <6 x i32> [[A]], <6 x i32> undef, <2 x i32> <i32 4, i32 5>
+; RM-NEXT:    [[SPLIT3:%.*]] = shufflevector <6 x i32> [[B:%.*]], <6 x i32> undef, <3 x i32> <i32 0, i32 1, i32 2>
+; RM-NEXT:    [[SPLIT4:%.*]] = shufflevector <6 x i32> [[B]], <6 x i32> undef, <3 x i32> <i32 3, i32 4, i32 5>
+; RM-NEXT:    [[BLOCK:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP0:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT:%.*]] = insertelement <1 x i32> undef, i32 [[TMP0]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP1:%.*]] = mul <1 x i32> [[SPLAT_SPLAT]], [[BLOCK]]
+; RM-NEXT:    [[BLOCK5:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP2:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT6:%.*]] = insertelement <1 x i32> undef, i32 [[TMP2]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT7:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT6]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP3:%.*]] = mul <1 x i32> [[SPLAT_SPLAT7]], [[BLOCK5]]
+; RM-NEXT:    [[TMP4:%.*]] = add <1 x i32> [[TMP1]], [[TMP3]]
+; RM-NEXT:    [[TMP5:%.*]] = shufflevector <1 x i32> [[TMP4]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP6:%.*]] = shufflevector <3 x i32> undef, <3 x i32> [[TMP5]], <3 x i32> <i32 3, i32 1, i32 2>
+; RM-NEXT:    [[BLOCK8:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP7:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT9:%.*]] = insertelement <1 x i32> undef, i32 [[TMP7]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT10:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT9]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP8:%.*]] = mul <1 x i32> [[SPLAT_SPLAT10]], [[BLOCK8]]
+; RM-NEXT:    [[BLOCK11:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP9:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT12:%.*]] = insertelement <1 x i32> undef, i32 [[TMP9]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT13:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT12]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP10:%.*]] = mul <1 x i32> [[SPLAT_SPLAT13]], [[BLOCK11]]
+; RM-NEXT:    [[TMP11:%.*]] = add <1 x i32> [[TMP8]], [[TMP10]]
+; RM-NEXT:    [[TMP12:%.*]] = shufflevector <1 x i32> [[TMP11]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP13:%.*]] = shufflevector <3 x i32> [[TMP6]], <3 x i32> [[TMP12]], <3 x i32> <i32 0, i32 3, i32 2>
+; RM-NEXT:    [[BLOCK14:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP14:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT15:%.*]] = insertelement <1 x i32> undef, i32 [[TMP14]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT16:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT15]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP15:%.*]] = mul <1 x i32> [[SPLAT_SPLAT16]], [[BLOCK14]]
+; RM-NEXT:    [[BLOCK17:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP16:%.*]] = extractelement <2 x i32> [[SPLIT]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT18:%.*]] = insertelement <1 x i32> undef, i32 [[TMP16]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT19:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT18]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP17:%.*]] = mul <1 x i32> [[SPLAT_SPLAT19]], [[BLOCK17]]
+; RM-NEXT:    [[TMP18:%.*]] = add <1 x i32> [[TMP15]], [[TMP17]]
+; RM-NEXT:    [[TMP19:%.*]] = shufflevector <1 x i32> [[TMP18]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP20:%.*]] = shufflevector <3 x i32> [[TMP13]], <3 x i32> [[TMP19]], <3 x i32> <i32 0, i32 1, i32 3>
+; RM-NEXT:    [[BLOCK20:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP21:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT21:%.*]] = insertelement <1 x i32> undef, i32 [[TMP21]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT22:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT21]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP22:%.*]] = mul <1 x i32> [[SPLAT_SPLAT22]], [[BLOCK20]]
+; RM-NEXT:    [[BLOCK23:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP23:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT24:%.*]] = insertelement <1 x i32> undef, i32 [[TMP23]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT25:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT24]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP24:%.*]] = mul <1 x i32> [[SPLAT_SPLAT25]], [[BLOCK23]]
+; RM-NEXT:    [[TMP25:%.*]] = add <1 x i32> [[TMP22]], [[TMP24]]
+; RM-NEXT:    [[TMP26:%.*]] = shufflevector <1 x i32> [[TMP25]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP27:%.*]] = shufflevector <3 x i32> undef, <3 x i32> [[TMP26]], <3 x i32> <i32 3, i32 1, i32 2>
+; RM-NEXT:    [[BLOCK26:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP28:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT27:%.*]] = insertelement <1 x i32> undef, i32 [[TMP28]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT28:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT27]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP29:%.*]] = mul <1 x i32> [[SPLAT_SPLAT28]], [[BLOCK26]]
+; RM-NEXT:    [[BLOCK29:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP30:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT30:%.*]] = insertelement <1 x i32> undef, i32 [[TMP30]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT31:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT30]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP31:%.*]] = mul <1 x i32> [[SPLAT_SPLAT31]], [[BLOCK29]]
+; RM-NEXT:    [[TMP32:%.*]] = add <1 x i32> [[TMP29]], [[TMP31]]
+; RM-NEXT:    [[TMP33:%.*]] = shufflevector <1 x i32> [[TMP32]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP34:%.*]] = shufflevector <3 x i32> [[TMP27]], <3 x i32> [[TMP33]], <3 x i32> <i32 0, i32 3, i32 2>
+; RM-NEXT:    [[BLOCK32:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP35:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT33:%.*]] = insertelement <1 x i32> undef, i32 [[TMP35]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT34:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT33]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP36:%.*]] = mul <1 x i32> [[SPLAT_SPLAT34]], [[BLOCK32]]
+; RM-NEXT:    [[BLOCK35:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP37:%.*]] = extractelement <2 x i32> [[SPLIT1]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT36:%.*]] = insertelement <1 x i32> undef, i32 [[TMP37]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT37:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT36]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP38:%.*]] = mul <1 x i32> [[SPLAT_SPLAT37]], [[BLOCK35]]
+; RM-NEXT:    [[TMP39:%.*]] = add <1 x i32> [[TMP36]], [[TMP38]]
+; RM-NEXT:    [[TMP40:%.*]] = shufflevector <1 x i32> [[TMP39]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP41:%.*]] = shufflevector <3 x i32> [[TMP34]], <3 x i32> [[TMP40]], <3 x i32> <i32 0, i32 1, i32 3>
+; RM-NEXT:    [[BLOCK38:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP42:%.*]] = extractelement <2 x i32> [[SPLIT2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT39:%.*]] = insertelement <1 x i32> undef, i32 [[TMP42]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT40:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT39]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP43:%.*]] = mul <1 x i32> [[SPLAT_SPLAT40]], [[BLOCK38]]
+; RM-NEXT:    [[BLOCK41:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP44:%.*]] = extractelement <2 x i32> [[SPLIT2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT42:%.*]] = insertelement <1 x i32> undef, i32 [[TMP44]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT43:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT42]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP45:%.*]] = mul <1 x i32> [[SPLAT_SPLAT43]], [[BLOCK41]]
+; RM-NEXT:    [[TMP46:%.*]] = add <1 x i32> [[TMP43]], [[TMP45]]
+; RM-NEXT:    [[TMP47:%.*]] = shufflevector <1 x i32> [[TMP46]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP48:%.*]] = shufflevector <3 x i32> undef, <3 x i32> [[TMP47]], <3 x i32> <i32 3, i32 1, i32 2>
+; RM-NEXT:    [[BLOCK44:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP49:%.*]] = extractelement <2 x i32> [[SPLIT2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT45:%.*]] = insertelement <1 x i32> undef, i32 [[TMP49]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT46:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT45]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP50:%.*]] = mul <1 x i32> [[SPLAT_SPLAT46]], [[BLOCK44]]
+; RM-NEXT:    [[BLOCK47:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> <i32 1>
+; RM-NEXT:    [[TMP51:%.*]] = extractelement <2 x i32> [[SPLIT2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT48:%.*]] = insertelement <1 x i32> undef, i32 [[TMP51]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT49:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT48]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP52:%.*]] = mul <1 x i32> [[SPLAT_SPLAT49]], [[BLOCK47]]
+; RM-NEXT:    [[TMP53:%.*]] = add <1 x i32> [[TMP50]], [[TMP52]]
+; RM-NEXT:    [[TMP54:%.*]] = shufflevector <1 x i32> [[TMP53]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP55:%.*]] = shufflevector <3 x i32> [[TMP48]], <3 x i32> [[TMP54]], <3 x i32> <i32 0, i32 3, i32 2>
+; RM-NEXT:    [[BLOCK50:%.*]] = shufflevector <3 x i32> [[SPLIT3]], <3 x i32> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP56:%.*]] = extractelement <2 x i32> [[SPLIT2]], i64 0
+; RM-NEXT:    [[SPLAT_SPLATINSERT51:%.*]] = insertelement <1 x i32> undef, i32 [[TMP56]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT52:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT51]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP57:%.*]] = mul <1 x i32> [[SPLAT_SPLAT52]], [[BLOCK50]]
+; RM-NEXT:    [[BLOCK53:%.*]] = shufflevector <3 x i32> [[SPLIT4]], <3 x i32> undef, <1 x i32> <i32 2>
+; RM-NEXT:    [[TMP58:%.*]] = extractelement <2 x i32> [[SPLIT2]], i64 1
+; RM-NEXT:    [[SPLAT_SPLATINSERT54:%.*]] = insertelement <1 x i32> undef, i32 [[TMP58]], i32 0
+; RM-NEXT:    [[SPLAT_SPLAT55:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT54]], <1 x i32> undef, <1 x i32> zeroinitializer
+; RM-NEXT:    [[TMP59:%.*]] = mul <1 x i32> [[SPLAT_SPLAT55]], [[BLOCK53]]
+; RM-NEXT:    [[TMP60:%.*]] = add <1 x i32> [[TMP57]], [[TMP59]]
+; RM-NEXT:    [[TMP61:%.*]] = shufflevector <1 x i32> [[TMP60]], <1 x i32> undef, <3 x i32> <i32 0, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP62:%.*]] = shufflevector <3 x i32> [[TMP55]], <3 x i32> [[TMP61]], <3 x i32> <i32 0, i32 1, i32 3>
+; RM-NEXT:    [[TMP63:%.*]] = shufflevector <3 x i32> [[TMP20]], <3 x i32> [[TMP41]], <6 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5>
+; RM-NEXT:    [[TMP64:%.*]] = shufflevector <3 x i32> [[TMP62]], <3 x i32> undef, <6 x i32> <i32 0, i32 1, i32 2, i32 undef, i32 undef, i32 undef>
+; RM-NEXT:    [[TMP65:%.*]] = shufflevector <6 x i32> [[TMP63]], <6 x i32> [[TMP64]], <9 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
+; RM-NEXT:    ret <9 x i32> [[TMP65]]
+;
+entry:
+  %c = call <9 x i32> @llvm.matrix.multiply.v6f64.v6f64.v6f64(<6 x i32> %a, <6 x i32> %b, i32 3, i32 2, i32 3)
+  ret <9 x i32> %c
+}
+
+declare <9 x i32> @llvm.matrix.multiply.v6f64.v6f64.v6f64(<6 x i32>, <6 x i32>, i32, i32, i32)


        


More information about the llvm-commits mailing list